blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
270cf00b72424c4ab5ef2e7ed3870a0b805ced8e | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Diagnostics/__init___parts/DebuggerBrowsableState.py | 7599756d739f37be9fff917e09f66f45fd9f7b82 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | class DebuggerBrowsableState(Enum,IComparable,IFormattable,IConvertible):
"""
Provides display instructions for the debugger.
enum DebuggerBrowsableState,values: Collapsed (2),Never (0),RootHidden (3)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Collapsed=None
Never=None
RootHidden=None
value__=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
799b4aaf7edaef6728d0dd5d5676703185f9f2f5 | 07131e91dcf2529e9c7058f8a8f239d419c8f7e0 | /1450.number-of-students-doing-homework-at-a-given-time.py | c9ff2ab40bfb09aa40ca1baf90d6766f217d4c97 | [] | no_license | Code-Wen/LeetCode_Notes | 5194c5c5306cb9f4a0fac85e06fefe6c02d65d44 | 791fc1b43beef89d668788de6d12f5c643431b8f | refs/heads/master | 2021-07-04T14:41:00.830723 | 2020-09-27T16:31:22 | 2020-09-27T16:31:22 | 178,456,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | #
# @lc app=leetcode id=1450 lang=python3
#
# [1450] Number of Students Doing Homework at a Given Time
#
# https://leetcode.com/problems/number-of-students-doing-homework-at-a-given-time/description/
#
# algorithms
# Easy (82.11%)
# Likes: 55
# Dislikes: 14
# Total Accepted: 13.4K
# Total Submissions: 16.3K
# Testcase Example: '[1,2,3]\n[3,2,7]\n4'
#
# Given two integer arrays startTime and endTime and given an integer
# queryTime.
#
# The ith student started doing their homework at the time startTime[i] and
# finished it at time endTime[i].
#
# Return the number of students doing their homework at time queryTime. More
# formally, return the number of students where queryTime lays in the interval
# [startTime[i], endTime[i]] inclusive.
#
#
# Example 1:
#
#
# Input: startTime = [1,2,3], endTime = [3,2,7], queryTime = 4
# Output: 1
# Explanation: We have 3 students where:
# The first student started doing homework at time 1 and finished at time 3 and
# wasn't doing anything at time 4.
# The second student started doing homework at time 2 and finished at time 2
# and also wasn't doing anything at time 4.
# The third student started doing homework at time 3 and finished at time 7 and
# was the only student doing homework at time 4.
#
#
# Example 2:
#
#
# Input: startTime = [4], endTime = [4], queryTime = 4
# Output: 1
# Explanation: The only student was doing their homework at the queryTime.
#
#
# Example 3:
#
#
# Input: startTime = [4], endTime = [4], queryTime = 5
# Output: 0
#
#
# Example 4:
#
#
# Input: startTime = [1,1,1,1], endTime = [1,3,2,4], queryTime = 7
# Output: 0
#
#
# Example 5:
#
#
# Input: startTime = [9,8,7,6,5,4,3,2,1], endTime =
# [10,10,10,10,10,10,10,10,10], queryTime = 5
# Output: 5
#
#
#
# Constraints:
#
#
# startTime.length == endTime.length
# 1 <= startTime.length <= 100
# 1 <= startTime[i] <= endTime[i] <= 1000
# 1 <= queryTime <= 1000
#
#
#
# @lc code=start
class Solution:
def busyStudent(self, startTime: List[int], endTime: List[int], queryTime: int) -> int:
return sum([startTime[i]<=queryTime<=endTime[i] for i in range(len(endTime))])
# @lc code=end
| [
"chenxu.wen.math@gmail.com"
] | chenxu.wen.math@gmail.com |
19cd2e3f1b9fb6e63d4dc44321ba9996e824be86 | 4236d2613e2a4f9aaf87cb9f400a63d6254aebb6 | /app/teacher/homework/test_cases/test003_home_item_list.py | 8f069373c235c2cda1b44e392f337aed518b65bb | [] | no_license | MerdLead/test_andriod_app | 3a79efb01a5185bf4e35c259ca0236afa8ec4214 | 7eac7a68eaf1bd77a4c78b5f1e1c311ea888f529 | refs/heads/master | 2020-03-30T07:25:14.285758 | 2018-09-30T06:42:35 | 2018-09-30T06:42:35 | 150,938,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | #!/usr/bin/env python
import unittest
from app.teacher.homework.object_page.release_hw_page import ReleasePage
from app.teacher.login.object_page.home_page import ThomePage
from app.teacher.login.object_page.login_page import TloginPage
from app.teacher.homework.object_page.homework_detail_page import HwDetailPage
from app.teacher.login.test_data.login_failed_toast import VALID_LOGIN_TOAST
from conf.decorator import setup, teardown, testcase
from utils.toast_find import Toast
class HomeItem(unittest.TestCase):
"""首页列表"""
@classmethod
@setup
def setUp(cls):
"""启动应用"""
cls.login = TloginPage()
cls.home = ThomePage()
cls.homework = ReleasePage()
cls.detail = HwDetailPage()
@classmethod
@teardown
def tearDown(cls):
pass
@testcase
def test_home_item_list(self):
self.login.app_status() # 判断APP当前状态
if self.home.wait_check_page(): # 页面检查点
if self.home.wait_check_no_page():
print('无最新动态 -- (用户指南) 欢迎使用在线助教,打开看看吧!')
else:
var = self.home.hw_list_operate([]) # 作业列表
while True:
self.detail.screen_swipe_up(0.5, 0.85, 0.5, 1000)
var = self.home.hw_list_operate(var[0]) # 作业列表
if int(var[1]) == 1:
break
while True:
if self.home.wait_check_image_page():
break
else:
self.homework.screen_swipe_down(0.5, 0.1, 0.85, 1000)
else:
Toast().find_toast(VALID_LOGIN_TOAST.login_failed())
print("未进入主界面")
| [
"merdlead@163.com"
] | merdlead@163.com |
4793099a4c76af4e4083bf262be99aa6c059769e | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /get_important_work.py | 15db59ad1383646a2662973fea34080ba0a76d6d | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py |
#! /usr/bin/env python
def man(str_arg):
try_thing(str_arg)
print('ask_week_by_great_week')
def try_thing(str_arg):
print(str_arg)
if __name__ == '__main__':
man('week')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
af86b124be12a85201a55723488be426934f4e65 | 2a0fee7a6f566843c6d533bc1f7b621c7e3b3400 | /net/data/crl_unittest/generate_crl_test_data.py | 955ee91e0eb56b68e09ba2739444efe819fde5ef | [
"BSD-3-Clause"
] | permissive | amitsadaphule/chromium | f53b022c12d196281c55e41e44d46ff9eadf0e56 | 074114ff0d3f24fc078684526b2682f03aba0b93 | refs/heads/master | 2022-11-22T22:10:09.177669 | 2019-06-16T12:14:29 | 2019-06-16T12:14:29 | 282,966,085 | 0 | 0 | BSD-3-Clause | 2020-07-27T17:19:52 | 2020-07-27T17:19:51 | null | UTF-8 | Python | false | false | 28,296 | py | #!/usr/bin/python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is called without any arguments to re-generate all of the *.pem
files in the script's directory.
The https://github.com/google/der-ascii tools must be in the PATH.
These tests assume that the verification time will be 2017-03-09 00:00:00 GMT
and verified with a max CRL age of 7 days.
"""
import datetime
import subprocess
import os
from OpenSSL import crypto
import base64
HEADER = "Generated by %s. Do not edit." % os.path.split(__file__)[1]
NEXT_SERIAL = 0
# 2017-01-01 00:00 GMT
CERT_DATE = datetime.datetime(2017, 1, 1, 0, 0)
# 2018-01-01 00:00 GMT
CERT_EXPIRE = CERT_DATE + datetime.timedelta(days=365)
def DictUnion(a, b):
return dict(a.items() + b.items())
def Der2Ascii(txt):
p = subprocess.Popen(['der2ascii'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_data, stderr_data = p.communicate(txt)
if p.returncode:
raise RuntimeError('der2ascii returned %i: %s' % (p.returncode,
stderr_data))
return stdout_data
def Ascii2Der(txt):
p = subprocess.Popen(['ascii2der'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_data, stderr_data = p.communicate(txt)
if p.returncode:
raise RuntimeError('ascii2der returned %i: %s' % (p.returncode,
stderr_data))
return stdout_data
def Ascii2OpensslDer(txt):
der = Ascii2Der(txt)
return 'DER:' + ''.join(['%02X' % ord(b) for b in der])
def CreateCert(name, signer, pkey=None, crl_dp=None, key_usage=None):
global NEXT_SERIAL
if pkey is None:
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 1024)
cert = crypto.X509()
cert.set_version(2)
cert.get_subject().CN = name
cert.set_pubkey(pkey)
cert.set_serial_number(NEXT_SERIAL)
NEXT_SERIAL += 1
cert.set_notBefore(CERT_DATE.strftime('%Y%m%d%H%M%SZ'))
cert.set_notAfter(CERT_EXPIRE.strftime('%Y%m%d%H%M%SZ'))
if crl_dp:
cert.add_extensions(
[crypto.X509Extension('crlDistributionPoints', False, crl_dp)])
if key_usage:
cert.add_extensions(
[crypto.X509Extension('keyUsage', False, key_usage)])
if signer:
cert.set_issuer(signer['cert'].get_subject())
cert.sign(signer['pkey'], 'sha256')
else:
cert.set_issuer(cert.get_subject())
cert.sign(pkey, 'sha256')
result = dict(cert=cert, pkey=pkey)
if not signer:
signer = result
result['signer'] = signer
return result
ROOT_CA = CreateCert('Test CA', None)
# Multiple versions of the intermediate. All use the same name and private key.
CA = CreateCert('Test Intermediate CA', ROOT_CA,
key_usage='critical, keyCertSign, cRLSign')
CA_NO_KEYUSAGE = CreateCert('Test Intermediate CA', ROOT_CA,
pkey=CA['pkey'], key_usage=None)
CA_KEYUSAGE_NOCRLSIGN = CreateCert('Test Intermediate CA', ROOT_CA,
pkey=CA['pkey'],
key_usage='critical, keyCertSign')
# A different CA with a different name and key.
OTHER_CA = CreateCert('Test Other Intermediate CA', ROOT_CA)
# The target cert, with a simple crlDistributionPoints pointing to an arbitrary
# URL, other crlDistributionPoints fields not set.
LEAF = CreateCert('Test Cert', CA, crl_dp='URI:http://example.com/foo.crl')
# The target cert, no crlDistributionPoints.
LEAF_NO_CRLDP = CreateCert('Test Cert', CA)
# The target cert, crlDistributionPoints with crlIssuer and
# crlDistributionPoints set.
LEAF_CRLDP_CRLISSUER = CreateCert('Test Cert', CA,
# It doesn't seem like you can set crlIssuers through the one-line openssl
# interface, so just do it manually.
crl_dp=Ascii2OpensslDer('''
SEQUENCE {
SEQUENCE {
[0] {
[0] {
[6 PRIMITIVE] { "http://example.com/foo.crl" }
}
}
[2] {
[4] {
SEQUENCE {
SET {
SEQUENCE {
# commonName
OBJECT_IDENTIFIER { 2.5.4.3 }
UTF8String { "Test CRL Issuer CA" }
}
}
}
}
}
}
}
'''))
# Self-issued intermediate with a new key signed by the |CA| key.
CA_NEW_BY_OLD = CreateCert('Test Intermediate CA', CA,
key_usage='critical, keyCertSign, cRLSign')
# Target cert signed by |CA_NEW_BY_OLD|'s key.
LEAF_BY_NEW = CreateCert(
'Test Cert', CA_NEW_BY_OLD, crl_dp='URI:http://example.com/foo.crl')
def SignAsciiCRL(tbs_inner_txt, signer=CA):
tbs_txt = 'SEQUENCE {\n%s\n}' % tbs_inner_txt
tbs_der = Ascii2Der(tbs_txt)
signature = crypto.sign(signer['pkey'], tbs_der, 'sha256')
crl_text = '''
SEQUENCE {
%s
SEQUENCE {
# sha256WithRSAEncryption
OBJECT_IDENTIFIER { 1.2.840.113549.1.1.11 }
NULL {}
}
BIT_STRING { `00%s` }
}
''' % (tbs_txt, signature.encode('hex'))
CRL = Ascii2Der(crl_text)
return CRL
def MakePemBlock(der, name):
text = Der2Ascii(der).rstrip('\n')
b64 = base64.b64encode(der)
wrapped = '\n'.join(b64[pos:pos + 64] for pos in xrange(0, len(b64), 64))
return '%s\n-----BEGIN %s-----\n%s\n-----END %s-----' % (
text, name, wrapped, name)
def WriteStringToFile(data, path):
with open(path, "w") as f:
f.write(data)
def Store(fname, description, leaf, ca, crl_der, ca2=None):
ca_cert_der = crypto.dump_certificate(crypto.FILETYPE_ASN1, ca['cert'])
cert_der = crypto.dump_certificate(crypto.FILETYPE_ASN1, leaf['cert'])
out = '\n\n'.join([
HEADER,
description,
MakePemBlock(crl_der, 'CRL'),
MakePemBlock(ca_cert_der, 'CA CERTIFICATE'),
MakePemBlock(cert_der, 'CERTIFICATE')])
if ca2:
ca_cert_2_der = crypto.dump_certificate(crypto.FILETYPE_ASN1, ca2['cert'])
out += '\n\n' + MakePemBlock(ca_cert_2_der, 'CA CERTIFICATE 2')
open('%s.pem' % fname, 'w').write(out)
crl_strings = {
'sha256WithRSAEncryption': '''
SEQUENCE {
OBJECT_IDENTIFIER { 1.2.840.113549.1.1.11 }
NULL {}
}
''',
'sha384WithRSAEncryption': '''
SEQUENCE {
OBJECT_IDENTIFIER { 1.2.840.113549.1.1.12 }
NULL {}
}
''',
'CA_name': '''
SEQUENCE {
SET {
SEQUENCE {
# commonName
OBJECT_IDENTIFIER { 2.5.4.3 }
UTF8String { "Test Intermediate CA" }
}
}
}
''',
'thisUpdate': 'UTCTime { "170302001122Z" }',
'nextUpdate': 'UTCTime { "170602001122Z" }',
'thisUpdateGeneralized': 'GeneralizedTime { "20170302001122Z" }',
'nextUpdateGeneralized': 'GeneralizedTime { "20170602001122Z" }',
'thisUpdate_too_old': 'UTCTime { "170301001122Z" }',
'thisUpdate_in_future': 'UTCTime { "170310001122Z" }',
'nextUpdate_too_old': 'UTCTime { "170308001122Z" }',
'leaf_revoked': '''
SEQUENCE {
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
}
''' % (LEAF['cert'].get_serial_number() + 100,
LEAF['cert'].get_serial_number(),
LEAF['cert'].get_serial_number() + 101),
'leaf_revoked_fake_extension': '''
SEQUENCE {
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
SEQUENCE {
SEQUENCE {
OBJECT_IDENTIFIER { 1.2.3.4 }
OCTET_STRING { `5678` }
}
}
}
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
}
''' % (LEAF['cert'].get_serial_number() + 100,
LEAF['cert'].get_serial_number(),
LEAF['cert'].get_serial_number() + 101),
'leaf_revoked_before_fake_critical_extension': '''
SEQUENCE {
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
# leaf revocation entry has no crlEntryExtensions
}
SEQUENCE {
INTEGER { %i }
UTCTime { "170201001122Z" }
# next revocation entry has a critical crlEntryExtension
SEQUENCE {
SEQUENCE {
OBJECT_IDENTIFIER { 1.2.3.4 }
BOOLEAN { `ff` }
OCTET_STRING { `5678` }
}
}
}
}
''' % (LEAF['cert'].get_serial_number(),
LEAF['cert'].get_serial_number() + 101),
'leaf_revoked_generalizedtime': '''
SEQUENCE {
SEQUENCE {
INTEGER { %i }
GeneralizedTime { "20170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { %i }
GeneralizedTime { "20170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { %i }
GeneralizedTime { "20170201001122Z" }
# no crlEntryExtensions
}
}
''' % (LEAF['cert'].get_serial_number() + 100,
LEAF['cert'].get_serial_number(),
LEAF['cert'].get_serial_number() + 101),
'fake_extension': '''
SEQUENCE {
OBJECT_IDENTIFIER { 1.2.3.4 }
OCTET_STRING { `5678` }
}
''',
'fake_critical_extension': '''
SEQUENCE {
OBJECT_IDENTIFIER { 1.2.3.4 }
BOOLEAN { `ff` }
OCTET_STRING { `5678` }
}
''',
}
Store(
'good',
'Leaf covered by CRLs and not revoked',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'good_issuer_name_normalization',
'Good, non-revoked, but issuer name in CRL requires case folding',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
SEQUENCE {
SET {
SEQUENCE {
# commonName
OBJECT_IDENTIFIER { 2.5.4.3 }
# Name that requires case folding and type conversion.
PrintableString { "tEST iNTERMEDIATE ca" }
}
}
}
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'good_issuer_no_keyusage',
'Leaf covered by CRLs and not revoked, issuer has no keyUsage extension',
LEAF, CA_NO_KEYUSAGE,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings, signer=CA_NO_KEYUSAGE))
Store(
'good_no_nextupdate',
'Leaf covered by CRLs and not revoked, optional nextUpdate field is absent',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
# no nextUpdate
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'good_fake_extension',
'Leaf covered by CRLs and not revoked, CRL has an irrelevant non-critical '
'extension',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
[0] {
SEQUENCE {
%(fake_extension)s
}
}
''' % crl_strings))
Store(
'good_fake_extension_no_nextupdate',
'Leaf covered by CRLs and not revoked, CRL has an irrelevant non-critical '
'extension',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
# no nextUpdate
# no revoked certs list
[0] {
SEQUENCE {
%(fake_extension)s
}
}
''' % crl_strings))
Store(
'good_generalizedtime',
'Leaf covered by CRLs and not revoked, dates encoded as GeneralizedTime',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdateGeneralized)s
%(nextUpdateGeneralized)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'good_no_version',
'Leaf covered by CRLs and not revoked, CRL is V1',
LEAF, CA,
SignAsciiCRL('''
# no version
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'good_no_crldp',
'Leaf covered by CRLs and not revoked, leaf has no crlDistributionPoints',
LEAF_NO_CRLDP, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'good_key_rollover',
"Leaf issued by CA's new key but CRL is signed by old key",
LEAF_BY_NEW, CA_NEW_BY_OLD, ca2=CA,
crl_der=SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'revoked',
'Leaf is revoked',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
%(leaf_revoked)s
# no crlExtensions
''' % crl_strings))
Store(
'revoked_no_nextupdate',
'Leaf is revoked, optional nextUpdate field is absent',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
# no nextUpdate
%(leaf_revoked)s
# no crlExtensions
''' % crl_strings))
Store(
'revoked_fake_crlentryextension',
'Leaf is revoked, has non-critical crlEntryExtension',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
%(leaf_revoked_fake_extension)s
# no crlExtensions
''' % crl_strings))
Store(
'revoked_generalized_revocationdate',
'Leaf is revoked, revocationDate is encoded as GeneralizedTime',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
%(leaf_revoked_generalizedtime)s
# no crlExtensions
''' % crl_strings))
Store(
'revoked_key_rollover',
"Leaf issued by CA's new key but CRL is signed by old key",
LEAF_BY_NEW, CA_NEW_BY_OLD, ca2=CA,
crl_der=SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
SEQUENCE {
SEQUENCE {
INTEGER { %(LEAF_SERIAL)i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
}
# no crlExtensions
''' % DictUnion(crl_strings,
{'LEAF_SERIAL':LEAF_BY_NEW['cert'].get_serial_number()})))
Store(
'bad_crldp_has_crlissuer',
'Leaf covered by CRLs and not revoked, leaf has crlDistributionPoints '
'with a crlIssuer',
LEAF_CRLDP_CRLISSUER, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'bad_fake_critical_extension',
'Leaf covered by CRLs and not revoked, but CRL has an unhandled critical '
'extension',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
# no nextUpdate
# no revoked certs list
[0] {
SEQUENCE {
%(fake_critical_extension)s
}
}
''' % crl_strings))
Store(
'bad_fake_critical_crlentryextension',
'Leaf is revoked, but a later entry has a critical crlEntryExtension',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
%(leaf_revoked_before_fake_critical_extension)s
# no crlExtensions
''' % crl_strings))
Store(
'bad_signature',
'No revoked certs, but CRL signed by a different key',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings, signer=OTHER_CA))
Store(
'bad_thisupdate_in_future',
'Leaf covered by CRLs and not revoked, but thisUpdate is in the future',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate_in_future)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'bad_thisupdate_too_old',
'Leaf covered by CRLs and not revoked, but thisUpdate time is more than '
'7 days before verification time',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate_too_old)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'bad_nextupdate_too_old',
'Leaf covered by CRLs and not revoked, but nextUpdate time is before '
'verification time',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate_too_old)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'bad_wrong_issuer',
'issuer name in CRL is different',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
SEQUENCE {
SET {
SEQUENCE {
# commonName
OBJECT_IDENTIFIER { 2.5.4.3 }
PrintableString { "Test Unrelated CA" }
}
}
}
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'bad_key_rollover_signature',
"Leaf issued by CA's new key which is signed by old key, but CRL isn't "
"signed by either",
LEAF_BY_NEW, CA_NEW_BY_OLD, ca2=CA,
crl_der=SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings, signer=OTHER_CA))
Store(
'invalid_mismatched_signature_algorithm',
'Leaf covered by CRLs and not revoked, but signatureAlgorithm in '
'CertificateList does not match the one in TBSCertList.',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha384WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'invalid_revoked_empty_sequence',
'revokedCertificates is an empty sequence (should be omitted)',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
SEQUENCE {
# no revoked certs. revokedCertificates should be omitted in this case.
}
# no crlExtensions
''' % crl_strings))
Store(
'invalid_v1_with_extension',
'CRL is V1 and has crlExtensions',
LEAF, CA,
SignAsciiCRL('''
# no version
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
# no nextUpdate
# no revoked certs list
[0] {
SEQUENCE {
%(fake_extension)s
}
}
''' % crl_strings))
Store(
'invalid_v1_with_crlentryextension',
'Leaf is revoked, has non-critical crlEntryExtension, but CRL is V1',
LEAF, CA,
SignAsciiCRL('''
# no version
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
%(leaf_revoked_fake_extension)s
# no crlExtensions
''' % crl_strings))
Store(
'invalid_v1_explicit',
'CRL has explicit V1 version',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 0 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'invalid_v3',
'CRL has invalid V3 version',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 2 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'invalid_issuer_keyusage_no_crlsign',
'Leaf covered by CRLs and not revoked, issuer has keyUsage extension '
'without the cRLSign bit set',
LEAF, CA_KEYUSAGE_NOCRLSIGN,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings, signer=CA_KEYUSAGE_NOCRLSIGN))
Store(
'invalid_key_rollover_issuer_keyusage_no_crlsign',
"Leaf issued by CA's new key but CRL is signed by old key, and the old "
"key cert has keyUsage extension without the cRLSign bit set",
LEAF_BY_NEW, CA_NEW_BY_OLD, ca2=CA_KEYUSAGE_NOCRLSIGN,
crl_der=SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings, signer=CA_KEYUSAGE_NOCRLSIGN))
Store(
'invalid_garbage_version',
'CRL version is garbage',
LEAF, CA,
SignAsciiCRL('''
OCTET_STRING { `01` }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'invalid_garbage_tbs_signature_algorithm',
'CRL tbs signature algorithm is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
INTEGER { 1 }
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'invalid_garbage_issuer_name',
'CRL issuer is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
INTEGER { 1 }
%(thisUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'invalid_garbage_thisupdate',
'CRL thisUpdate is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
INTEGER { 1 }
%(thisUpdate)s
# no revoked certs list
# no crlExtensions
''' % crl_strings))
Store(
'invalid_garbage_after_thisupdate',
'CRL garbage after thisupdate',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
# garbage:
INTEGER { 1 }
''' % crl_strings))
Store(
'invalid_garbage_after_nextupdate',
'CRL garbage after nextUpdate',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# garbage:
INTEGER { 1 }
''' % crl_strings))
Store(
'invalid_garbage_after_revokedcerts',
'CRL garbage after revokedCertificates',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
# no nextUpdate
%(leaf_revoked)s
# no crlExtensions
# garbage: nextUpdate doesn't go here:
%(nextUpdate)s
''' % crl_strings))
Store(
'invalid_garbage_after_extensions',
'CRL garbage after extensions',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
# no revoked certs list
[0] {
SEQUENCE {
%(fake_extension)s
}
}
# Garbage: revoked certs sequence doesn't go here:
%(leaf_revoked)s
''' % crl_strings))
Store(
'invalid_garbage_tbscertlist',
'CRL garbage tbsCertList',
LEAF, CA,
Ascii2Der('''
SEQUENCE {
OCTET_STRING { `5678` }
SEQUENCE {
# sha256WithRSAEncryption
OBJECT_IDENTIFIER { 1.2.840.113549.1.1.11 }
NULL {}
}
# Actual signatureValue doesn't matter, shouldn't get to verifying signature.
BIT_STRING { `001a` }
}
'''))
Store(
'invalid_garbage_signaturealgorithm',
'CRL garbage signatureAlgorithm',
LEAF, CA,
Ascii2Der('''
SEQUENCE {
SEQUENCE {
INTEGER { 1 }
# tbsCertList contents doesn't matter, parsing shouldn't get this far.
}
OCTET_STRING { `5678` }
# Actual signatureValue doesn't matter, shouldn't get to verifying signature.
BIT_STRING { `001a` }
}
'''))
Store(
'invalid_garbage_signaturevalue',
'CRL garbage signatureValue',
LEAF, CA,
Ascii2Der('''
SEQUENCE {
SEQUENCE {
INTEGER { 1 }
# tbsCertList contents doesn't matter, parsing shouldn't get this far.
}
SEQUENCE {
# sha256WithRSAEncryption
OBJECT_IDENTIFIER { 1.2.840.113549.1.1.11 }
NULL {}
}
# Actual signatureValue contents don't matter, should be BIT_STRING rather
# than OCTET_STRING.
OCTET_STRING { `001a` }
}
'''))
Store(
'invalid_garbage_after_signaturevalue',
'CRL garbage after signatureValue',
LEAF, CA,
Ascii2Der('''
SEQUENCE {
SEQUENCE {
INTEGER { 1 }
# tbsCertList contents doesn't matter, parsing shouldn't get this far.
}
SEQUENCE {
# sha256WithRSAEncryption
OBJECT_IDENTIFIER { 1.2.840.113549.1.1.11 }
NULL {}
}
# Actual signatureValue doesn't matter, shouldn't get to verifying signature.
BIT_STRING { `001a` }
SEQUENCE {}
}
'''))
Store(
'invalid_garbage_revoked_serial_number',
'Leaf is revoked but a following crlentry is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
SEQUENCE {
SEQUENCE {
INTEGER { %(LEAF_SERIAL)i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
OCTET_STRING { `7F`}
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
}
# no crlExtensions
''' % (DictUnion(crl_strings,
{'LEAF_SERIAL':LEAF['cert'].get_serial_number()}))))
Store(
'invalid_garbage_revocationdate',
'Leaf is revoked but a following crlentry is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
SEQUENCE {
SEQUENCE {
INTEGER { %(LEAF_SERIAL)i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { 100001 }
OCTET_STRING { "170201001122Z" }
# no crlEntryExtensions
}
}
# no crlExtensions
''' % (DictUnion(crl_strings,
{'LEAF_SERIAL':LEAF['cert'].get_serial_number()}))))
Store(
'invalid_garbage_after_revocationdate',
'Leaf is revoked but a following crlentry is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
SEQUENCE {
SEQUENCE {
INTEGER { %(LEAF_SERIAL)i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { 100001 }
UTCTime { "170201001122Z" }
INTEGER { 01 }
}
}
# no crlExtensions
''' % (DictUnion(crl_strings,
{'LEAF_SERIAL':LEAF['cert'].get_serial_number()}))))
Store(
'invalid_garbage_after_crlentryextensions',
'Leaf is revoked but a following crlentry is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
SEQUENCE {
SEQUENCE {
INTEGER { %(LEAF_SERIAL)i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
SEQUENCE {
INTEGER { 100001 }
UTCTime { "170201001122Z" }
SEQUENCE {
SEQUENCE {
OBJECT_IDENTIFIER { 1.2.3.4 }
OCTET_STRING { `5678` }
}
}
INTEGER { 01 }
}
}
# no crlExtensions
''' % (DictUnion(crl_strings,
{'LEAF_SERIAL':LEAF['cert'].get_serial_number()}))))
Store(
'invalid_garbage_crlentry',
'Leaf is revoked but a following crlentry is garbage',
LEAF, CA,
SignAsciiCRL('''
INTEGER { 1 }
%(sha256WithRSAEncryption)s
%(CA_name)s
%(thisUpdate)s
%(nextUpdate)s
SEQUENCE {
SEQUENCE {
INTEGER { %(LEAF_SERIAL)i }
UTCTime { "170201001122Z" }
# no crlEntryExtensions
}
INTEGER { 01 }
}
# no crlExtensions
''' % (DictUnion(crl_strings,
{'LEAF_SERIAL':LEAF['cert'].get_serial_number()}))))
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
7b02309bae0bf6468085e6e3fbfe3cb4bdb6e5e9 | d6791a60af5570dbf925b26f3b9ec7608a47fbeb | /setup.py | 611dca753801f3fb6854624ad7a27c02aec9e99c | [] | no_license | ammubhave/sqlalchemy-simql | e9c2e45d5629087b1be96be58118b362b42725c6 | 4e9947a49502355e65d30eec64b1e86af0428287 | refs/heads/master | 2020-05-17T22:28:22.030723 | 2014-07-15T14:41:44 | 2014-07-15T14:41:44 | 21,827,961 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
from setuptools import setup
setup(
author='Amol Bhave',
author_email='ambhave' '@' 'mit.edu',
description='SQLAlchemy extension for SimQL',
entry_points="""
[sqlalchemy.dialects]
simql = sqlalchemy_simql.dialect:SimqlDialect
""",
install_requires=[
'SQLAlchemy>=0.9.4',
],
long_description=open('README.md', 'rt').read(),
name='SQLAlchemy-SimQL',
packages=[
'sqlalchemy_simql',
'sqlalchemy_simql.dbapi2',
'sqlalchemy_simql.dialect',
],
url='https://github.com/ammubhave/sqlalchemy-simql',
version='0.1.0',
)
| [
"ammubhave@gmail.com"
] | ammubhave@gmail.com |
5382fe2586f3528c5d7a6ab1b1f479bf061c3d3e | a59d55ecf9054d0750168d3ca9cc62a0f2b28b95 | /.install/.backup/platform/gsutil/gslib/third_party/oauth2_plugin/oauth2_client.py | 10e3f2372b8cd123aee7891c5575985b0782c485 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bopopescu/google-cloud-sdk | bb2746ff020c87271398196f21a646d9d8689348 | b34e6a18f1e89673508166acce816111c3421e4b | refs/heads/master | 2022-11-26T07:33:32.877033 | 2014-06-29T20:43:23 | 2014-06-29T20:43:23 | 282,306,367 | 0 | 0 | NOASSERTION | 2020-07-24T20:04:47 | 2020-07-24T20:04:46 | null | UTF-8 | Python | false | false | 18,977 | py | # Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth2 client library.
This library provides a client implementation of the OAuth2 protocol (see
https://developers.google.com/storage/docs/authentication.html#oauth).
**** Experimental API ****
This module is experimental and is subject to modification or removal without
notice.
"""
# This implementation is a wrapper around the oauth2client implementation
# that implements caching of access tokens independent of refresh
# tokens (in the python API client oauth2client, there is a single class that
# encapsulates both refresh and access tokens).
import cgi
import socks
import datetime
import errno
from hashlib import sha1
import httplib2
import logging
import multiprocessing
import os
import tempfile
import urllib
import urlparse
from boto import cacerts
from boto import config
from gslib.util import CreateLock
from gslib.util import Retry
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import HAS_CRYPTO
from oauth2client.client import OAuth2Credentials
if HAS_CRYPTO:
from oauth2client.client import SignedJwtAssertionCredentials
try:
import json
except ImportError:
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson as json
except ImportError:
# Try for simplejson
import simplejson as json
global token_exchange_lock
def InitializeMultiprocessingVariables():
"""
Perform necessary initialization - see
gslib.command.InitializeMultiprocessingVariables for an explanation of why
this is necessary.
"""
global token_exchange_lock
# Lock used for checking/exchanging refresh token so that a parallelized
# operation doesn't attempt concurrent refreshes.
token_exchange_lock = CreateLock()
LOG = logging.getLogger('oauth2_client')
GSUTIL_DEFAULT_SCOPE = 'https://www.googleapis.com/auth/devstorage.full_control'
class TokenCache(object):
"""Interface for OAuth2 token caches."""
def PutToken(self, key, value):
raise NotImplementedError
def GetToken(self, key):
raise NotImplementedError
class NoopTokenCache(TokenCache):
"""A stub implementation of TokenCache that does nothing."""
def PutToken(self, key, value):
pass
def GetToken(self, key):
return None
class InMemoryTokenCache(TokenCache):
"""An in-memory token cache.
The cache is implemented by a python dict, and inherits the thread-safety
properties of dict.
"""
def __init__(self):
super(InMemoryTokenCache, self).__init__()
self.cache = dict()
def PutToken(self, key, value):
LOG.debug('InMemoryTokenCache.PutToken: key=%s', key)
self.cache[key] = value
def GetToken(self, key):
value = self.cache.get(key, None)
LOG.debug('InMemoryTokenCache.GetToken: key=%s%s present',
key, ' not' if value is None else '')
return value
class FileSystemTokenCache(TokenCache):
"""An implementation of a token cache that persists tokens on disk.
Each token object in the cache is stored in serialized form in a separate
file. The cache file's name can be configured via a path pattern that is
parameterized by the key under which a value is cached and optionally the
current processes uid as obtained by os.getuid().
Since file names are generally publicly visible in the system, it is important
that the cache key does not leak information about the token's value. If
client code computes cache keys from token values, a cryptographically strong
one-way function must be used.
"""
def __init__(self, path_pattern=None):
"""Creates a FileSystemTokenCache.
Args:
path_pattern: Optional string argument to specify the path pattern for
cache files. The argument should be a path with format placeholders
'%(key)s' and optionally '%(uid)s'. If the argument is omitted, the
default pattern
<tmpdir>/oauth2client-tokencache.%(uid)s.%(key)s
is used, where <tmpdir> is replaced with the system temp dir as
obtained from tempfile.gettempdir().
"""
super(FileSystemTokenCache, self).__init__()
self.path_pattern = path_pattern
if not path_pattern:
self.path_pattern = os.path.join(
tempfile.gettempdir(), 'oauth2_client-tokencache.%(uid)s.%(key)s')
def CacheFileName(self, key):
uid = '_'
try:
# os.getuid() doesn't seem to work in Windows
uid = str(os.getuid())
except:
pass
return self.path_pattern % {'key': key, 'uid': uid}
def PutToken(self, key, value):
"""Serializes the value to the key's filename.
To ensure that written tokens aren't leaked to a different users, we
a) unlink an existing cache file, if any (to ensure we don't fall victim
to symlink attacks and the like),
b) create a new file with O_CREAT | O_EXCL (to ensure nobody is trying to
race us)
If either of these steps fail, we simply give up (but log a warning). Not
caching access tokens is not catastrophic, and failure to create a file
can happen for either of the following reasons:
- someone is attacking us as above, in which case we want to default to
safe operation (not write the token);
- another legitimate process is racing us; in this case one of the two
will win and write the access token, which is fine;
- we don't have permission to remove the old file or write to the
specified directory, in which case we can't recover
Args:
key: the hash key to store.
value: the access_token value to serialize.
"""
cache_file = self.CacheFileName(key)
LOG.debug('FileSystemTokenCache.PutToken: key=%s, cache_file=%s',
key, cache_file)
try:
os.unlink(cache_file)
except:
# Ignore failure to unlink the file; if the file exists and can't be
# unlinked, the subsequent open with O_CREAT | O_EXCL will fail.
pass
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
# Accommodate Windows; stolen from python2.6/tempfile.py.
if hasattr(os, 'O_NOINHERIT'):
flags |= os.O_NOINHERIT
if hasattr(os, 'O_BINARY'):
flags |= os.O_BINARY
try:
fd = os.open(cache_file, flags, 0600)
except (OSError, IOError) as e:
LOG.warning('FileSystemTokenCache.PutToken: '
'Failed to create cache file %s: %s', cache_file, e)
return
f = os.fdopen(fd, 'w+b')
f.write(value.Serialize())
f.close()
def GetToken(self, key):
"""Returns a deserialized access token from the key's filename."""
value = None
cache_file = self.CacheFileName(key)
try:
f = open(cache_file)
value = AccessToken.UnSerialize(f.read())
f.close()
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
LOG.warning('FileSystemTokenCache.GetToken: '
'Failed to read cache file %s: %s', cache_file, e)
except Exception as e:
LOG.warning('FileSystemTokenCache.GetToken: '
'Failed to read cache file %s (possibly corrupted): %s',
cache_file, e)
LOG.debug('FileSystemTokenCache.GetToken: key=%s%s present (cache_file=%s)',
key, ' not' if value is None else '', cache_file)
return value
class OAuth2Client(object):
"""Common logic for OAuth2 clients."""
def __init__(self, cache_key_base, access_token_cache=None,
datetime_strategy=datetime.datetime, auth_uri=None,
token_uri=None, disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, ca_certs_file=None):
# datetime_strategy is used to invoke utcnow() on; it is injected into the
# constructor for unit testing purposes.
self.auth_uri = auth_uri
self.token_uri = token_uri
self.cache_key_base = cache_key_base
self.datetime_strategy = datetime_strategy
self.access_token_cache = access_token_cache or InMemoryTokenCache()
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.ca_certs_file = ca_certs_file
if proxy_host and proxy_port:
self._proxy_info = httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP,
proxy_host,
proxy_port,
proxy_rdns=True)
else:
self._proxy_info = None
def CreateHttpRequest(self):
return httplib2.Http(
ca_certs=self.ca_certs_file,
disable_ssl_certificate_validation
= self.disable_ssl_certificate_validation,
proxy_info=self._proxy_info)
def GetAccessToken(self):
"""Obtains an access token for this client.
This client's access token cache is first checked for an existing,
not-yet-expired access token. If none is found, the client obtains a fresh
access token from the OAuth2 provider's token endpoint.
Returns:
The cached or freshly obtained AccessToken.
Raises:
AccessTokenRefreshError if an error occurs.
"""
# Ensure only one thread at a time attempts to get (and possibly refresh)
# the access token. This doesn't prevent concurrent refresh attempts across
# multiple gsutil instances, but at least protects against multiple threads
# simultaneously attempting to refresh when gsutil -m is used.
token_exchange_lock.acquire()
try:
cache_key = self.CacheKey()
LOG.debug('GetAccessToken: checking cache for key %s', cache_key)
access_token = self.access_token_cache.GetToken(cache_key)
LOG.debug('GetAccessToken: token from cache: %s', access_token)
if access_token is None or access_token.ShouldRefresh():
LOG.debug('GetAccessToken: fetching fresh access token...')
access_token = self.FetchAccessToken()
LOG.debug('GetAccessToken: fresh access token: %s', access_token)
self.access_token_cache.PutToken(cache_key, access_token)
return access_token
finally:
token_exchange_lock.release()
def CacheKey(self):
"""Computes a cache key.
The cache key is computed as the SHA1 hash of the refresh token for user
accounts, or the hash of the gs_service_client_id for service accounts,
which satisfies the FileSystemTokenCache requirement that cache keys do not
leak information about token values.
Returns:
A hash key.
"""
h = sha1()
h.update(self.cache_key_base)
return h.hexdigest()
def GetAuthorizationHeader(self):
"""Gets the access token HTTP authorization header value.
Returns:
The value of an Authorization HTTP header that authenticates
requests with an OAuth2 access token.
"""
return 'Bearer %s' % self.GetAccessToken().token
class OAuth2ServiceAccountClient(OAuth2Client):
def __init__(self, client_id, private_key, password,
access_token_cache=None, auth_uri=None, token_uri=None,
datetime_strategy=datetime.datetime,
disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, ca_certs_file=None):
"""Creates an OAuth2ServiceAccountClient.
Args:
client_id: The OAuth2 client ID of this client.
private_key: The private key associated with this service account.
password: The private key password used for the crypto signer.
access_token_cache: An optional instance of a TokenCache. If omitted or
None, an InMemoryTokenCache is used.
auth_uri: The URI for OAuth2 authorization.
token_uri: The URI used to refresh access tokens.
datetime_strategy: datetime module strategy to use.
disable_ssl_certificate_validation: True if certifications should not be
validated.
proxy_host: An optional string specifying the host name of an HTTP proxy
to be used.
proxy_port: An optional int specifying the port number of an HTTP proxy
to be used.
ca_certs_file: The cacerts.txt file to use.
"""
super(OAuth2ServiceAccountClient, self).__init__(
cache_key_base=client_id, auth_uri=auth_uri, token_uri=token_uri,
access_token_cache=access_token_cache,
datetime_strategy=datetime_strategy,
disable_ssl_certificate_validation=disable_ssl_certificate_validation,
proxy_host=proxy_host, proxy_port=proxy_port,
ca_certs_file=ca_certs_file)
self.client_id = client_id
self.private_key = private_key
self.password = password
def FetchAccessToken(self):
credentials = SignedJwtAssertionCredentials(self.client_id,
self.private_key, scope=GSUTIL_DEFAULT_SCOPE,
private_key_password=self.password)
http = self.CreateHttpRequest()
credentials.refresh(http)
return AccessToken(credentials.access_token,
credentials.token_expiry, datetime_strategy=self.datetime_strategy)
class GsAccessTokenRefreshError(Exception):
"""Rate limiting error when exchanging refresh token for access token."""
def __init__(self, e):
super(Exception, self).__init__(e)
class GsInvalidRefreshTokenError(Exception):
def __init__(self, e):
super(Exception, self).__init__(e)
class OAuth2UserAccountClient(OAuth2Client):
"""An OAuth2 client."""
def __init__(self, token_uri, client_id, client_secret, refresh_token,
auth_uri=None, access_token_cache=None,
datetime_strategy=datetime.datetime,
disable_ssl_certificate_validation=False,
proxy_host=None, proxy_port=None, ca_certs_file=None):
"""Creates an OAuth2UserAccountClient.
Args:
token_uri: The URI used to refresh access tokens.
client_id: The OAuth2 client ID of this client.
client_secret: The OAuth2 client secret of this client.
refresh_token: The token used to refresh the access token.
auth_uri: The URI for OAuth2 authorization.
access_token_cache: An optional instance of a TokenCache. If omitted or
None, an InMemoryTokenCache is used.
datetime_strategy: datetime module strategy to use.
disable_ssl_certificate_validation: True if certifications should not be
validated.
proxy_host: An optional string specifying the host name of an HTTP proxy
to be used.
proxy_port: An optional int specifying the port number of an HTTP proxy
to be used.
ca_certs_file: The cacerts.txt file to use.
"""
super(OAuth2UserAccountClient, self).__init__(
cache_key_base=refresh_token, auth_uri=auth_uri, token_uri=token_uri,
access_token_cache=access_token_cache,
datetime_strategy=datetime_strategy,
disable_ssl_certificate_validation=disable_ssl_certificate_validation,
proxy_host=proxy_host, proxy_port=proxy_port,
ca_certs_file=ca_certs_file)
self.token_uri = token_uri
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
@Retry(GsAccessTokenRefreshError,
tries=config.get('OAuth2', 'oauth2_refresh_retries', 6),
timeout_secs=1)
def FetchAccessToken(self):
"""Fetches an access token from the provider's token endpoint.
Fetches an access token from this client's OAuth2 provider's token endpoint.
Returns:
The fetched AccessToken.
"""
try:
http = self.CreateHttpRequest()
credentials = OAuth2Credentials(None, self.client_id, self.client_secret,
self.refresh_token, None, self.token_uri, None)
credentials.refresh(http)
return AccessToken(credentials.access_token,
credentials.token_expiry, datetime_strategy=self.datetime_strategy)
except AccessTokenRefreshError, e:
if 'Invalid response 403' in e.message:
# This is the most we can do at the moment to accurately detect rate
# limiting errors since they come back as 403s with no further
# information.
raise GsAccessTokenRefreshError(e)
elif 'invalid_grant' in e.message:
LOG.info("""
Attempted to retrieve an access token from an invalid refresh token. Two common
cases in which you will see this error are:
1. Your refresh token was revoked.
2. Your refresh token was typed incorrectly.
""")
raise GsInvalidRefreshTokenError(e)
else:
raise
class AccessToken(object):
"""Encapsulates an OAuth2 access token."""
def __init__(self, token, expiry, datetime_strategy=datetime.datetime):
self.token = token
self.expiry = expiry
self.datetime_strategy = datetime_strategy
@staticmethod
def UnSerialize(query):
"""Creates an AccessToken object from its serialized form."""
def GetValue(d, key):
return (d.get(key, [None]))[0]
kv = cgi.parse_qs(query)
if not kv['token']:
return None
expiry = None
expiry_tuple = GetValue(kv, 'expiry')
if expiry_tuple:
try:
expiry = datetime.datetime(
*[int(n) for n in expiry_tuple.split(',')])
except:
return None
return AccessToken(GetValue(kv, 'token'), expiry)
def Serialize(self):
"""Serializes this object as URI-encoded key-value pairs."""
# There's got to be a better way to serialize a datetime. Unfortunately,
# there is no reliable way to convert into a unix epoch.
kv = {'token': self.token}
if self.expiry:
t = self.expiry
tupl = (t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond)
kv['expiry'] = ','.join([str(i) for i in tupl])
return urllib.urlencode(kv)
def ShouldRefresh(self, time_delta=300):
"""Whether the access token needs to be refreshed.
Args:
time_delta: refresh access token when it expires within time_delta secs.
Returns:
True if the token is expired or about to expire, False if the
token should be expected to work. Note that the token may still
be rejected, e.g. if it has been revoked server-side.
"""
if self.expiry is None:
return False
return (self.datetime_strategy.utcnow()
+ datetime.timedelta(seconds=time_delta) > self.expiry)
def __eq__(self, other):
return self.token == other.token and self.expiry == other.expiry
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'AccessToken(token=%s, expiry=%sZ)' % (self.token, self.expiry)
| [
"alfred.wechselberger@technologyhatchery.com"
] | alfred.wechselberger@technologyhatchery.com |
dc2b1eb5e5daa541ad0ab6f159afdcfccb90587c | 11692a0e0b784252dea9ce5c2d97297d890ab520 | /arquivos-py/CursoEmVideo_Python3_DESAFIOS/desafio06.py | 7abbef956bcec92837a5f7e19dd35bfce23beca3 | [
"MIT"
] | permissive | oliveiralecca/cursoemvideo-python3 | bcd6dde54e26702f964e002221dda36c6eb8fd9f | e0a3e27d73a49ce0e72ae4faa9ac0c6da9811d2e | refs/heads/master | 2023-03-31T07:21:34.739162 | 2021-03-29T00:38:42 | 2021-03-29T00:38:42 | 271,679,170 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | print('======= DESAFIO 06 =======')
n = int(input('Digite um número: '))
d = n * 2
t = n * 3
rq = n ** (1/2) # ou: pow(n,(1/2))
print('Dobro: {}\nTriplo: {}\nRaiz Quadrada: {:.2f}'.format(d, t, rq))
| [
"oliveiraslc@yahoo.com.br"
] | oliveiraslc@yahoo.com.br |
81752bf73a3e871c0d4134d8e24c7115d82cf894 | de5eafa17b8c1e3a8aedb2848e03282eae775334 | /augment/augment_intersect_Brent9060_v11.py | 59748c202660ae4f1a881810b7422eba360cd708 | [] | no_license | ekourkchi/HI | cba6601fbbb96231d457cc6a0a379c2fdb7a3e4d | 796ff5b9826d9d64e2a761e3ef587411df8edd80 | refs/heads/master | 2020-05-25T19:21:05.663026 | 2020-03-14T04:08:47 | 2020-03-14T04:08:47 | 187,949,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,109 | py | #!/usr/bin/python
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import subprocess
import math
import matplotlib.pyplot as plt
import numpy as np
import pylab as py
from astropy.table import Table, Column
import random
from scipy.optimize import curve_fit
from astropy.stats import sigma_clip
import sqlcl
import urllib2
import astropy.coordinates as coord
import astropy.units as u
# This is the default circular velocity and LSR peculiar velocity of the Sun
# TODO: make this a config item?
VCIRC = 220. # u.km/u.s
VLSR = [10., 5.25, 7.17] # *u.km/u.s
######################################
def vgsr_to_vhel(gl, gb, vgsr, vcirc=VCIRC, vlsr=VLSR):
l = gl*np.pi/180.
b = gb*np.pi/180.
lsr = vgsr - vcirc*np.sin(l)*np.cos(b)
v_correct = vlsr[0]*np.cos(b)*np.cos(l) + \
vlsr[1]*np.cos(b)*np.sin(l) + \
vlsr[2]*np.sin(b)
vhel = lsr - v_correct
return vhel
######################################
def vhel_to_vgsr(gl, gb, vhel, vcirc=VCIRC, vlsr=VLSR):
l = gl*np.pi/180.
b = gb*np.pi/180.
if not inpsinstance(vhel, u.Quantity):
raise TypeError("vhel must be a Quantity subclass")
lsr = vhel + vcirc*np.sin(l)*np.cos(b)
v_correct = vlsr[0]*np.cos(b)*np.cos(l) + \
vlsr[1]*np.cos(b)*np.sin(l) + \
vlsr[2]*np.sin(b)
vgsr = lsr + v_correct
return vgsr
######################################
def isNaN(num):
return num != num
######################################
def Vh2Vls(el,b, Vh):
alpha = np.pi / 180.
np.cosb = np.cos(b*alpha)
npsinb = np.sin(b*alpha)
np.cosl = np.cos(el*alpha)
npsinl = np.sin(el*alpha)
vls = float(Vh)-26.*np.cosl*np.cosb+317.*npsinl*np.cosb-8.*npsinb
return vls
######################################
### (another "Vlg" has been given by Courteau and van den Bergh; another by Yahil et al.)
### The Vlg used in MKgroups is their own version.
### The following function just works fine for MK-groups
def Vlg2Vls(el,b, Vlg):
alpha = np.pi / 180.
np.cosb = np.cos(b*alpha)
npsinb = np.sin(b*alpha)
np.cosl = np.cos(el*alpha)
npsinl = np.sin(el*alpha)
Vh=float(Vlg)+16.*np.cosl*np.cosb-315.*npsinl*np.cosb+22.*npsinb
vls = float(Vh)-26.*np.cosl*np.cosb+317.*npsinl*np.cosb-8.*npsinb
return vls
######################################
### The Vlg used in MKgroups is their own version.
def Vlg2Vh(el,b, Vlg):
alpha = np.pi / 180.
np.cosb = np.cos(b*alpha)
npsinb = np.sin(b*alpha)
np.cosl = np.cos(el*alpha)
npsinl = np.sin(el*alpha)
Vh=float(Vlg)+16.*np.cosl*np.cosb-315.*npsinl*np.cosb+22.*npsinb
return Vh
######################################
inFile = 'wise_all.csv'
table = np.genfromtxt( inFile , delimiter=',', filling_values=None, names=True, dtype=None)
wise_name = table['ID']
wise_pgc = table['PGC']
######################################
def xcmd(cmd,verbose):
if verbose: print '\n'+cmd
tmp=os.popen(cmd)
output=''
for x in tmp: output+=x
if 'abort' in output:
failure=True
else:
failure=tmp.close()
if False:
print 'execution of %s failed' % cmd
print 'error is as follows',output
sys.exit()
else:
return output
######################################
#################
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
###############################
def get_scales(photometry, nskip=29):
table = np.genfromtxt(photometry , delimiter=',', filling_values=None, names=True, dtype=None, skip_header=nskip)
try: ebv = float(table['ebv'])
except: ebv=-9.99
try: a_gal = float(table['a_gal'])
except: a_gal=-9.99
try: central_mu = float(table['central_mu'])
except: central_mu=-9.99
try: mu_90 = float(table['mu_90'])
except: mu_90=-9.99
try: mu_50 = float(table['mu_50'])
except: mu_50=-9.99
try: concentration = float(table['concentration'])
except: concentration=-9.99
try: m_255 = float(table['m_255'])
except: m_255=-9.99
try: disc_mu0 = float(table['disc_mu0'])
except: disc_mu0=-9.99
try: scale_length_h = float(table['scale_length_h'])
except: scale_length_h=-9.99
try: R_asy = float(table['R_asy'])
except: R_asy=-9.99
try: R_90 = float(table['R_90'])
except: R_90=-9.99
try: R_80 = float(table['R_80'])
except: R_80=-9.99
try: R_50 = float(table['R_50'])
except: R_50=-9.99
try: R_20 = float(table['R_20'])
except: R_20=-9.99
try: R_255 = float(table['R_255'])
except: R_255=-9.99
try: d_m_ext = float(table['d_m_ext'])
except: d_m_ext=-9.99
return ebv, a_gal, central_mu, mu_50, mu_90, m_255, disc_mu0, scale_length_h, R_50, R_90, R_255, concentration, d_m_ext
###############################
### For a given photometry file, this returns the magnitude value
def get_mag(photometry, index=2, header=None):
if header is not None:
if os.path.exists(photometry):
with open(photometry) as f:
for line in f:
foundit = False
once = True
if line.split(" ")[0]== '#':
line_split = line.split(" ")
not_void = 0
key = None
for thing in line_split:
if thing != '':
not_void+=1
if not_void==2 and once:
key=thing
once = False
if not_void==3 and key==header:
foundit = True
break
if foundit: return np.float(thing)
if header is not None: return 0
if os.path.exists(photometry):
with open(photometry) as f:
counter = 1
for line in f:
if counter == 14:
line_split = line.split(" ")
not_void = 0
for thing in line_split:
if thing != '': not_void+=1
if not_void==index:
break
return np.float(thing)
counter+=1
###############################
###############################
### For a given photometry file, this returns the magnitude value
def get_mag_f(photometry, index=2, header=None):
if header is not None:
if os.path.exists(photometry):
with open(photometry) as f:
for line in f:
foundit = False
once = True
if line.split(" ")[0]== '#':
line_split = line.split(" ")
not_void = 0
key = None
for thing in line_split:
if thing != '':
not_void+=1
if not_void==2 and once:
key=thing
once = False
if not_void==3 and key==header:
foundit = True
break
if foundit: return np.float(thing)
if header is not None: return 0
if os.path.exists(photometry):
with open(photometry) as f:
counter = 1
for line in f:
if line.split(" ")[0]!= '#' and line.split(" ")[0]!='#\n':
line_split = line.split(" ")
not_void = 0
for thing in line_split:
if thing != '':
not_void+=1
set_param = True
if not_void==index:
break
return np.float(thing)
counter+=1
def get_mag_wise(photometry, index=2):
mag = get_mag_f(photometry, index=index)
if mag!=None:
return mag
else:
return -1000
###############################
def get_semimajor(filename):
with open(filename) as f:
counter = 1
for line in f:
if counter == 14:
line_split = line.split(" ")
not_void = 0
for thing in line_split:
if thing != '': not_void+=1
if not_void==1:
break
return np.float(thing)
counter+=1
def get_ellipse(filename):
ra_cen = -1
dec_cen = -1
semimajor = -1
semiminor = -1
PA = -1
with open(filename) as f:
counter = 1
for line in f:
if counter == 14:
line_split = line.split(" ")
not_void = 0
set_param = False
for thing in line_split:
if thing != '':
not_void+=1
set_param = True
if not_void==1 and set_param:
set_param = False
ra_cen=np.float(thing)
if not_void==2 and set_param:
dec_cen=np.float(thing)
set_param = False
if not_void==3 and set_param:
semimajor=np.float(thing)
set_param = False
if not_void==4 and set_param:
semiminor=np.float(thing)
set_param = False
if not_void==5 and set_param:
PA=np.float(thing)
break
return ra_cen, dec_cen, semimajor, semiminor, PA
counter+=1
#################################
###############################
def isInSDSS_DR12(ra, dec):
querry = "select dbo.fInFootprintEq("+str(ra)+","+str(dec)+", 1)"
lines = sqlcl.query(querry).readlines()
if lines[2] == "True\n":
return 1
else:
return 0
###############################
###############################
def get_ellipse_wise(filename):
ra_cen = -1
dec_cen = -1
semimajor = -1
semiminor = -1
PA = -1
with open(filename) as f:
counter = 1
for line in f:
if line.split(" ")[0]!= '#' and line.split(" ")[0]!='#\n': # counter == 17:
line_split = line.split(" ")
not_void = 0
set_param = False
for thing in line_split:
if thing != '':
not_void+=1
set_param = True
if not_void==1 and set_param:
set_param = False
ra_cen=np.float(thing)
if not_void==2 and set_param:
dec_cen=np.float(thing)
set_param = False
if not_void==3 and set_param:
semimajor=np.float(thing)
set_param = False
if not_void==4 and set_param:
semiminor=np.float(thing)
set_param = False
if not_void==5 and set_param:
PA=np.float(thing)
break
return ra_cen, dec_cen, semimajor, semiminor, PA
counter+=1
#################################
#################################
def get_quality(filename, nline=40):
line_no = 0
seprator = ' '
for line in open(filename, 'r'):
columns = line.split(seprator)
line_no+=1
if len(columns) >= 2 and line_no==nline:
key = columns[0]
j = 1
while columns[j] == '' or columns[j] == '=': j+=1
return int(columns[j])
return -1
#################################
def read_note(filename):
qa_note = filename
note = ' '
if os.path.exists(qa_note):
with open(qa_note) as f:
counter = 1
for line in f:
if counter == 11:
line_split = line.split("=")
note = line_split[1]
note = note[0:len(note)-1]
counter+=1
return note
#################################
#################################
def ra_db(ra): # returns a string
ra_id = str(int(np.floor(ra)))
if ra < 10:
ra_id = '00'+ra_id+'D'
elif ra < 100:
ra_id = '0'+ra_id+'D'
else:
ra_id = ra_id+'D'
return ra_id
#################################
#################
def QA_SDSS_DONE(pgc, ra):
databse = '/home/ehsan/db_esn/'+'/cf4_sdss/data/'
name = 'pgc'+str(pgc)
if os.path.exists(databse+ra_db(ra)+'/sdss/fits/'+name+'_qa.txt'):
return True
return False
#################
def QA_WISE_DONE(pgc, ra):
global wise_name, wise_pgc
databse = '/home/ehsan/db_esn/'+'/cf4_wise/data/'
if pgc in wise_pgc:
i_lst = np.where(pgc == wise_pgc)
name = wise_name[i_lst][0]
if os.path.exists(databse+ra_db(ra)+'/wise/fits/'+name+'_qa.txt'):
return True
name = 'pgc'+str(pgc)
if os.path.exists(databse+ra_db(ra)+'/wise/fits/'+name+'_qa.txt'):
return True
return False
#######################################
######################################
def rnd_inc(inc):
if inc==None: return None
if inc>90: return 90.
if inc<45: return 0.
d_inc = inc - int(inc)
if d_inc>0.5: inc=int(inc)+1
elif d_inc<0.5: inc=int(inc)
else:
rand = random.randint(0,1)
if rand==0: inc=int(inc)
else: inc=int(inc)+1
return inc
######################################
def inc_append(incs, email, inc):
n = 1
if email=='rbtully1@gmail.com': n = 4
elif email=='ekourkchi@gmail.com': n = 4
elif email=='s.eftekharzadeh@gmail.com': n = 2
elif email=='mokelkea@hawaii.edu': n = 3
elif email=='chasemu@hawaii.edu': n = 2
elif email=='jrl2014@hawaii.edu': n = 3
elif email=='dschoen@hawaii.edu': n = 4
elif email=='adholtha@hawaii.edu': n = 4
elif email=='chuangj@hawaii.edu': n = 2
elif email=='mi24@hawaii.edu': n = 2
elif email=='mka7@hawaii.edu': n = 1
elif email=='a.danesh61@gmail.com': n = 1
elif email=='cgrubner0@gmail.com': n = 1
elif email=='pascal.jouve@free.fr': n = 2
elif email=='dlsaintsorny@gmail.com': n = 2
elif email=='arnaud.ohet@gmail.com': n = 1
elif email=='hawaii@udrea.fr': n = 1
elif email=='helenecourtois33@gmail.com': n = 4
elif email=='claude.rene21@gmail.com': n = 1
elif email=='fredwallet@gmail.com': n = 2
elif email=='henri140860@wanadoo.fr': n = 2
elif email=='joannin.lycee@free.fr': n = 2
elif email=='bevig434@gmail.com': n = 2
elif email=='echarraix69@gmail.com': n = 2
for i in range(n): incs.append(inc)
return incs
######################################
def correction(i, email):
a=1
b=0
if email=='chuangj@hawaii.edu':
a = 0.9698391552105461
b = 3.582543838111245
if email=='mi24@hawaii.edu':
a = 0.9819724300214063
b = 2.485648837307963
if email=='arnaud.ohet@gmail.com':
a = 0.8925968302721691
b = 8.021973390519326
if email=='cgrubner0@gmail.com':
a = 0.8957026107782403
b = 9.076420810780814
if email=='jrl2014@hawaii.edu':
a = 0.9350710901954157
b = 5.178922022569104
a = 1./a
b = -1.*b*a # -b/a
return a*i+b
######################################
def fitFunc(x, a, b):
return a*x+b
######################################
def addNote(note, text):
if text=='': return note
if note=='':
note = '['+text+']'
else:
note = note+' '+'['+text+']'
return note
def addConcern(note, cncrn):
if cncrn[0]>0: note = addNote(note, 'not_sure')
if cncrn[1]>0: note = addNote(note, 'better_image')
if cncrn[2]>0: note = addNote(note, 'bad_TF')
if cncrn[3]>0: note = addNote(note, 'ambiguous')
if cncrn[4]>0: note = addNote(note, 'disturbed')
if cncrn[5]>0: note = addNote(note, 'HI')
if cncrn[6]>0: note = addNote(note, 'face_on')
if cncrn[7]>0: note = addNote(note, 'not_spiral')
if cncrn[8]>0: note = addNote(note, 'multiple')
return note
######################################
#######################################
def getINC(include_Email=None, exclude_Email=[]):
if include_Email==None:
emails = ['rbtully1@gmail.com','ekourkchi@gmail.com','mokelkea@hawaii.edu', 'jrl2014@hawaii.edu', 'dschoen@hawaii.edu', 'adholtha@hawaii.edu']
else:
emails = include_Email
#### Manoa
inFile = 'EDD.inclination.All.Manoa.22Oct2018115942.txt'
table = np.genfromtxt(inFile , delimiter='|', filling_values=None, names=True, dtype=None)
pgc_incout = table['pgcID']
inc_incout = table['inc']
flag_incout = table['flag']
note = [' '.join(dummy.split()) for dummy in table['note']]
email = [' '.join(dummy.split()) for dummy in table['email']]
NS = table['not_sure']
BI = table['better_image']
TF = table['bad_TF']
AM = table['ambiguous']
DI = table['disturbed']
HI = table['HI']
FO = table['face_on']
NP = table['not_spiral']
MU = table['multiple']
#### Guest
inFile = 'EDD.inclination.All.Guest.22Oct2018115929.txt'
table = np.genfromtxt(inFile , delimiter='|', filling_values=None, names=True, dtype=None)
pgc_incout_ = table['pgcID']
inc_incout_ = table['inc']
flag_incout_ = table['flag']
note_ = [' '.join(dummy.split()) for dummy in table['note']]
email_ = [' '.join(dummy.split()) for dummy in table['email']]
NS_ = table['not_sure']
BI_ = table['better_image']
TF_ = table['bad_TF']
AM_ = table['ambiguous']
DI_ = table['disturbed']
HI_ = table['HI']
FO_ = table['face_on']
NP_ = table['not_spiral']
MU_ = table['multiple']
PGC = []
for i in range(len(pgc_incout)):
if not pgc_incout[i] in PGC:
PGC.append(pgc_incout[i])
for i in range(len(pgc_incout_)):
if not pgc_incout_[i] in PGC:
PGC.append(pgc_incout_[i])
incDict = {}
for i in range(len(PGC)):
data = []
indx = np.where(PGC[i] == pgc_incout)
for j in indx[0]:
if email[j] in emails and not email[j] in exclude_Email:
inc_incout[j] = correction(inc_incout[j], email[j])
data.append([email[j], inc_incout[j],flag_incout[j],note[j], [NS[j], BI[j], TF[j], AM[j], DI[j], HI[j], FO[j], NP[j], MU[j]]])
indx = np.where(PGC[i] == pgc_incout_)
for j in indx[0]:
if email_[j] in emails and not email_[j] in exclude_Email:
inc_incout_[j] = correction(inc_incout_[j], email[j])
data.append([email[j], inc_incout_[j],flag_incout_[j],note_[j], [NS_[j], BI_[j], TF_[j], AM_[j], DI_[j], HI_[j], FO_[j], NP_[j], MU_[j]]])
incDict[PGC[i]] = data
return incDict
###########################################################
######################################
def incMedian(incDic):
boss = 'ekourkchi@gmail.com'
Keypeople = []
for item in incDic:
Keypeople.append(item[0])
if item[0] == 'rbtully1@gmail.com':
boss = 'rbtully1@gmail.com'
flag = 0
inc = 0
note = ''
stdev = 0
n = 0 # number of good measurments
concerns = np.zeros(9)
if boss in Keypeople:
poss_i = 0
for ppl in Keypeople:
if ppl==boss: break
poss_i+=1
if incDic[poss_i][2] != 0: # boss has flagged it
flag = 1
for item in incDic:
if item[2]==1:
note = addNote(note, item[3])
concerns+=np.asarray(item[4])
n+=1
else: # boss has NOT flagged it
flag = 0
incs = []
incs2 = []
for item in incDic:
if item[2]==0:
incs.append(item[1])
incs2 = inc_append(incs2, item[0], item[1])
note = addNote(note, item[3])
n+=1
incs = np.asarray(incs)
filtered_data = sigma_clip(incs, sigma=2, iters=5, copy=False)
incs = filtered_data.data[np.logical_not(filtered_data.mask)]
stdev = np.std(incs)
incs2 = np.asarray(incs2)
filtered_data = sigma_clip(incs2, sigma=2, iters=5, copy=False)
incs2 = filtered_data.data[np.logical_not(filtered_data.mask)]
inc = np.median(incs2)
else:
flag = []
for item in incDic:
flag.append(item[2])
flag = np.median(flag)
if flag > 0: flag =1
incs = []
incs2 = []
for item in incDic:
if item[2]==flag:
incs.append(item[1])
incs2 = inc_append(incs2, item[0], item[1])
note = addNote(note, item[3])
concerns+=np.asarray(item[4])
n+=1
incs = np.asarray(incs)
filtered_data = sigma_clip(incs, sigma=2, iters=5, copy=False)
incs = filtered_data.data[np.logical_not(filtered_data.mask)]
stdev = np.std(incs)
incs2 = np.asarray(incs2)
filtered_data = sigma_clip(incs2, sigma=2, iters=5, copy=False)
incs2 = filtered_data.data[np.logical_not(filtered_data.mask)]
inc = np.median(incs2)
note = addConcern(note, concerns)
inc = rnd_inc(inc)
if inc>=89:
err = 1.
elif inc>=85:
err = 2.
elif inc>=69:
err = 3.
elif inc>=50:
err = 4.
elif inc>=45:
err = 6.
else:
err = 0
flag = 1
inc = 0
stdev = 0
stdev = np.max([stdev, err])
stdev = np.round(stdev)
return inc, stdev, flag, note, n
#######################################
#################
def query_leda_lyon(pgc):
leda = []
query=""
if True:
query=query+"%20or%20pgc%3D"+str(pgc)
if True:
query=query[5:]
url='http://leda.univ-lyon1.fr/leda/fullsqlmean.cgi?Query=select%20*%20where'+query
result=urllib2.urlopen(url)
for myline in result:
if "<" in myline:
continue
if myline=="":
continue
elements=myline.replace(" ","").split("|")
elements=[x if x!="-" else None for x in elements]
if ("pgc" in elements[0]):
continue
if (len(elements)<2):
continue
elements.pop()
if (elements):
#print elements[:3]
leda.append((elements))
query=""
pgc_leda = None
ra_leda = None
dec_leda = None
l_leda = None
b_leda = None
sgl_leda = None
sgb_leda = None
logd25_leda = None
logr25_leda = None
pa_leda = None
ty_leda = None
type_leda = None
Vhel_leda = None
if (leda):
leda = leda[0]
pgc_leda = int(leda[0])
ra_leda = float(leda[5])*15.
dec_leda = float(leda[6])
l_leda = float(leda[7])
b_leda = float(leda[8])
sgl_leda = float(leda[9])
sgb_leda = float(leda[10])
logd25_leda = float(leda[20])
logr25_leda = float(leda[22])
pa_leda = float(leda[24])
ty_leda = float(leda[17])
type_leda = (leda[12])
Vhel_leda = float(leda[52])
return([pgc_leda, ra_leda, dec_leda, l_leda, b_leda, sgl_leda, sgb_leda, logd25_leda, logr25_leda, pa_leda, ty_leda, type_leda, Vhel_leda])
#return leda
###############################
########################################################### TEST
#print get_mag_wise('/run/media/ehsan/6ccd3c78-12e8-4f00-815d-faf200b314cf/ehsan/db_esn/cf4_wise/data/001D/photometry/NGC7821_w1_asymptotic.dat', index=1)
#print get_mag_f('/run/media/ehsan/6ccd3c78-12e8-4f00-815d-faf200b314cf/ehsan/db_esn/cf4_wise/data/001D/photometry/NGC7821_w1_asymptotic.dat', header='A_Gal:')
#print get_mag('/run/media/ehsan/6ccd3c78-12e8-4f00-815d-faf200b314cf/ehsan/db_esn/cf4_sdss/data/018D/photometry/pgc1264576_g_asymptotic.dat')
#print get_mag('/run/media/ehsan/6ccd3c78-12e8-4f00-815d-faf200b314cf/ehsan/db_esn/cf4_sdss/data/018D/photometry/pgc1264576_g_asymptotic.dat', index=1)
#print get_mag('/run/media/ehsan/6ccd3c78-12e8-4f00-815d-faf200b314cf/ehsan/db_esn/cf4_sdss/data/018D/photometry/pgc1264576_g_asymptotic.dat', header='A_Gal:')
#sys.exit()
########################################################### Begin
inFile = 'EDD_distance_cf4_v22.csv'
table = np.genfromtxt(inFile , delimiter='|', filling_values=None, names=True, dtype=None)
pgc = table['pgc']
ra = table['ra']
dec = table['dec']
gl = table['gl']
gb = table['gb']
sgl = table['sgl']
sgb = table['sgb']
d25 = table['d25']
b_a = table['b_a']
pa = table['pa']
ty = table['ty']
type = table['type']
sdss = table['sdss']
alfa100_ = table['alfa100']
QA_sdss = table['QA_sdss']
QA_wise = table['QA_wise']
############################################################
inFile = 'All_LEDA_EDD.csv'
table = np.genfromtxt( inFile , delimiter=',', filling_values=-1000000, names=True, dtype=None)
pgc_leda = table['pgc']
ra_leda = table['al2000']
ra_leda *= 15.
dec_leda = table['de2000']
l_leda = table['l2']
b_leda = table['b2']
sgl_leda = table['sgl']
sgb_leda = table['sgb']
logd25_leda = table['logd25']
d25_leda = 0.1*(10**logd25_leda)
logr25_leda = table['logr25']
b_a_leda = 1./(10**logr25_leda)
pa_leda = table['pa']
ty_leda = table['t']
type_leda = table['type']
LEDA_vhelio = table['v']
############################################################
#####################################################
inFile = 'Alfa100_EDD.csv'
table = np.genfromtxt( inFile , delimiter='|', filling_values=-1000000, names=True, dtype=None)
pgc_edd_alfa100 = table['PGC']
Vhel_alfa100 = table['Vhel']
inFile = 'tmp1'
table = np.genfromtxt( inFile , delimiter='|', filling_values=None, names=True, dtype=None)
pgc_sdss = table['pgc']
#####################################################
ADHI = np.genfromtxt('ADHI.csv' , delimiter=',', filling_values=-1000000, names=True, dtype=None )
ADHI_pgc = ADHI['PGC'] # ADHI
ADHI_vh = ADHI['Vh_av']
Cornel = np.genfromtxt('Cornel_HI.csv' , delimiter='|', filling_values=-1000000, names=True, dtype=None )
Cornel_pgc = Cornel['PGC'] # Cornell
Cornelvh = Cornel['Vhel']
TMRS = np.genfromtxt('2MRS_allsky.csv' , delimiter=',', filling_values=-1000000, names=True, dtype=None )
TMRS_pgc = TMRS['pgc'] # 2MRS
TMRS_vh = TMRS['Vhel']
TMPP = np.genfromtxt('2M++_allsky.csv' , delimiter=',', filling_values=-1000000, names=True, dtype=None )
TMPP_pgc = TMPP['pgc'] # 2M++
TMPP_vhell = TMPP['Vhel']
CF3D = np.genfromtxt('CF3D_allsky.csv' , delimiter=',', filling_values=-1000000, names=True, dtype=None )
CF3D_pgc = CF3D['pgc'] # CF3D
CF3D_vhell = CF3D['Vhel']
MKgr = np.genfromtxt('MKgroups_allsky.csv' , delimiter=',', filling_values=-1000000, names=True, dtype=None )
MKgr_pgc = MKgr['pgc'] # CF3D
MKgr_Vlg = MKgr['Vlg']
Upda = np.genfromtxt('Updated_allsky.csv' , delimiter=',', filling_values=-1000000, names=True, dtype=None )
Upda_pgc = Upda['pgc'] # Updated
Upda_Vh = Upda['Vh']
KTg = np.genfromtxt('KTgroups.csv' , delimiter='|', filling_values=-1000000, names=True, dtype=None )
KTg_pgc = KTg['PGC'] # Updated
KTg_Vhel = KTg['Vhel']
KTg_Vls = KTg['Vls']
pgc_ = []
ra_ = []
dec_ = []
l_ = []
b_ = []
sgl_ = []
sgb_ = []
sdss_ = []
d25_ = []
alfa100 = []
QA = []
QA_wise = []
pa_ = []
b_a_ = []
ty_ = []
type_ = []
Vhel_ = []
Vls_ = []
for i in range(len(pgc)):
if not pgc[i] in pgc_:
pgc_.append(pgc[i])
ra_.append(ra[i])
dec_.append(dec[i])
l_.append(gl[i])
b_.append(gb[i])
sgl_.append(sgl[i])
sgb_.append(sgb[i])
d25_.append(d25[i])
sdss_.append(sdss[i])
pa_.append(pa[i])
b_a_.append(b_a[i])
ty_.append(ty[i])
alfa100.append(alfa100_[i])
if QA_SDSS_DONE(pgc[i], ra[i]):
QA.append(1)
else: QA.append(0)
if QA_WISE_DONE(pgc[i], ra[i]):
QA_wise.append(1)
else: QA_wise.append(0)
type_.append(type[i])
added = False
if pgc[i] in KTg_pgc:
indices, = np.where(KTg_pgc==pgc[i])
if KTg_Vhel[indices[0]]!=0:
Vls_.append(KTg_Vls[indices[0]])
Vhel_.append(KTg_Vhel[indices[0]])
added = True
#print 'KTg: ',pgc[i],KTg_Vhel[indices[0]]
if not added and pgc[i] in CF3D_pgc:
indices, = np.where(CF3D_pgc==pgc[i])
if CF3D_vhell[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], CF3D_vhell[indices[0]]))
Vhel_.append(CF3D_vhell[indices[0]])
added = True
#print 'cf3: ',pgc[i],CF3D_vhell[indices[0]]
if not added and pgc[i] in TMRS_pgc:
indices, = np.where(TMRS_pgc==pgc[i])
if TMRS_vh[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], TMRS_vh[indices[0]]))
Vhel_.append(TMRS_vh[indices[0]])
added = True
#print '2mrs: ',pgc[i],TMRS_vh[indices[0]]
if not added and pgc[i] in TMPP_pgc:
indices, = np.where(TMPP_pgc==pgc[i])
if TMPP_vhell[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], TMPP_vhell[indices[0]]))
Vhel_.append(TMPP_vhell[indices[0]])
added = True
#print '2m++: ',pgc[i],TMPP_vhell[indices[0]]
if not added and pgc[i] in ADHI_pgc:
indices, = np.where(ADHI_pgc==pgc[i])
if ADHI_vh[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], ADHI_vh[indices[0]]))
Vhel_.append(ADHI_vh[indices[0]])
added = True
#print 'ADHI: ',pgc[i],ADHI_vh[indices[0]]
if not added and pgc[i] in pgc_edd_alfa100:
indices, = np.where(pgc_edd_alfa100==pgc[i])
if Vhel_alfa100[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], Vhel_alfa100[indices[0]]))
Vhel_.append(Vhel_alfa100[indices[0]])
added = True
#print 'Alfa: ',pgc[i],Vhel_alfa100[indices[0]]
if not added and pgc[i] in Upda_pgc:
indices, = np.where(Upda_pgc==pgc[i])
if Upda_Vh[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], Upda_Vh[indices[0]]))
Vhel_.append(Upda_Vh[indices[0]])
added = True
#print 'Upda: ',pgc[i],Upda_Vh[indices[0]]
if not added and pgc[i] in MKgr_pgc:
indices, = np.where(MKgr_pgc==pgc[i])
if MKgr_Vlg[indices[0]]!=-1000000:
Vhelio = Vlg2Vh(gl[i], gb[i], MKgr_Vlg[indices[0]])
Vls_.append(Vlg2Vls(gl[i], gb[i], MKgr_Vlg[indices[0]]))
Vhel_.append(Vhelio)
added = True
#print 'MKg: ',pgc[i],Vhelio
if not added and pgc[i] in Cornel_pgc:
indices, = np.where(Cornel_pgc==pgc[i])
if Cornelvh[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], Cornelvh[indices[0]]))
Vhel_.append(Cornelvh[indices[0]])
added = True
#print 'ADHI: ',pgc[i],Cornelvh[indices[0]]
if not added and pgc[i] in pgc_leda:
indices, = np.where(pgc_leda==pgc[i])
if LEDA_vhelio[indices[0]]!=-1000000:
Vls_.append(Vh2Vls(gl[i], gb[i], LEDA_vhelio[indices[0]]))
Vhel_.append(LEDA_vhelio[indices[0]])
added = True
#print 'LEDA: ',pgc[i],LEDA_vhelio[indices[0]]
if not added:
try:
leda_q = query_leda_lyon(pgc[i])
Vhelio = leda_q[12]
Vls_.append(Vh2Vls(gl[i], gb[i], Vhelio))
Vhel_.append(Vhelio)
except:
print 'no Velocity for PGC: ', pgc[i]
Vls_.append(-1000000)
Vhel_.append(-1000000)
#sys.exit()
#####################################################
print "Adding Types from the LEDA catalog"
pgc_ = np.asarray(pgc_)
ra_ = np.asarray(ra_)
dec_ = np.asarray(dec_)
l_ = np.asarray(l_)
b_ = np.asarray(b_)
sgl_ = np.asarray(sgl_)
sgb_ = np.asarray(sgb_)
d25_ = np.asarray(d25_)
b_a_ = np.asarray(b_a_)
pa_ = np.asarray(pa_)
ty_ = np.asarray(ty_)
type_ = np.asarray(type_)
sdss_ = np.asarray(sdss_)
alfa100 = np.asarray(alfa100)
QA = np.asarray(QA)
QA_wise = np.asarray(QA_wise)
Vhel_ = np.asarray(Vhel_)
Vls_ = np.asarray(Vls_)
index = np.argsort(pgc_)
pgc_ = pgc_[index]
ra_ = ra_[index]
dec_ = dec_[index]
l_ = l_[index]
b_ = b_[index]
sgl_ = sgl_[index]
sgb_ = sgb_[index]
d25_ = d25_[index]
b_a_ = b_a_[index]
pa_ = pa_[index]
ty_ = ty_[index]
type_ = type_[index]
sdss_ = sdss_[index]
alfa100 = alfa100[index]
QA = QA[index]
QA_wise = QA_wise[index]
Vhel_ = Vhel_[index]
Vls_ = Vls_[index]
index, = np.where(Vhel_==-1000000)
Vhel_[index] = None
Vls_[index] = None
for i in range(len(pgc_)):
gal = pgc_[i]
if gal in [58411,58239,17170,1977897,9476]:
sdss_[i] = 0
#####################################################################
print "Taking Care of inclinations ..."
A_emails = ['rbtully1@gmail.com', 'mokelkea@hawaii.edu', 'jrl2014@hawaii.edu', 'dschoen@hawaii.edu', 'mi24@hawaii.edu', 'chuangj@hawaii.edu']
B_emails = ['ekourkchi@gmail.com', 's.eftekharzadeh@gmail.com', 'chasemu@hawaii.edu', 'adholtha@hawaii.edu', 'mka7@hawaii.edu', 'a.danesh61@gmail.com', 'helenecourtois33@gmail.com']
C_emails = ['cgrubner0@gmail.com', 'pascal.jouve@free.fr', 'dlsaintsorny@gmail.com', 'arnaud.ohet@gmail.com', 'hawaii@udrea.fr', 'henri140860@wanadoo.fr']
D_emails = ['henri140860@wanadoo.fr', 'claude.rene21@gmail.com', 'fredwallet@gmail.com', 'joannin.lycee@free.fr', 'bevig434@gmail.com', 'echarraix69@gmail.com']
incDic = getINC(include_Email=A_emails+B_emails+C_emails+D_emails)
print "Taking Care of flags ..."
location_sdss = '/home/ehsan/db_esn/cf4_sdss/data/'
location_wise = '/home/ehsan/db_esn/cf4_wise/data/'
N = len(pgc_)
Squality = np.zeros(N)
Wquality = np.zeros(N)
disturbed = np.zeros((N,), dtype='a1')
trail = np.zeros((N,), dtype='a1')
not_spiral = np.zeros((N,), dtype='a1')
face_on = np.zeros((N,), dtype='a1')
faint = np.zeros((N,), dtype='a1')
crowded = np.zeros((N,), dtype='a1')
over_masked = np.zeros((N,), dtype='a1')
fov = np.zeros((N,), dtype='a1')
multiple = np.zeros((N,), dtype='a1')
bright_star = np.zeros((N,), dtype='a1')
uncertain = np.zeros((N,), dtype='a1')
note = np.zeros((N,), dtype='a100')
source = np.zeros((N,), dtype='a4')
uu_mag = np.zeros((N,))
gg_mag = np.zeros((N,))
rr_mag = np.zeros((N,))
ii_mag = np.zeros((N,))
zz_mag = np.zeros((N,))
Sba = np.zeros((N,))
Spa = np.zeros((N,))
u_Rasy = np.zeros((N,))
g_Rasy = np.zeros((N,))
r_Rasy = np.zeros((N,))
i_Rasy = np.zeros((N,))
z_Rasy = np.zeros((N,))
A_u = np.zeros((N,))
A_g = np.zeros((N,))
A_r = np.zeros((N,))
A_i = np.zeros((N,))
A_z = np.zeros((N,))
ebv = np.zeros((N,))
mu0_u = np.zeros((N,))
mu0_g = np.zeros((N,))
mu0_r = np.zeros((N,))
mu0_i = np.zeros((N,))
mu0_z = np.zeros((N,))
mu50_u = np.zeros((N,))
mu50_g = np.zeros((N,))
mu50_r = np.zeros((N,))
mu50_i = np.zeros((N,))
mu50_z = np.zeros((N,))
mu90_u = np.zeros((N,))
mu90_g = np.zeros((N,))
mu90_r = np.zeros((N,))
mu90_i = np.zeros((N,))
mu90_z = np.zeros((N,))
m255_u = np.zeros((N,))
m255_g = np.zeros((N,))
m255_r = np.zeros((N,))
m255_i = np.zeros((N,))
m255_z = np.zeros((N,))
disc_mu0_u = np.zeros((N,))
disc_mu0_g = np.zeros((N,))
disc_mu0_r = np.zeros((N,))
disc_mu0_i = np.zeros((N,))
disc_mu0_z = np.zeros((N,))
SLh_u = np.zeros((N,))
SLh_g = np.zeros((N,))
SLh_r = np.zeros((N,))
SLh_i = np.zeros((N,))
SLh_z = np.zeros((N,))
R50_u = np.zeros((N,))
R50_g = np.zeros((N,))
R50_r = np.zeros((N,))
R50_i = np.zeros((N,))
R50_z = np.zeros((N,))
R90_u = np.zeros((N,))
R90_g = np.zeros((N,))
R90_r = np.zeros((N,))
R90_i = np.zeros((N,))
R90_z = np.zeros((N,))
R255_u = np.zeros((N,))
R255_g = np.zeros((N,))
R255_r = np.zeros((N,))
R255_i = np.zeros((N,))
R255_z = np.zeros((N,))
Cntion_u = np.zeros((N,))
Cntion_g = np.zeros((N,))
Cntion_r = np.zeros((N,))
Cntion_i = np.zeros((N,))
Cntion_z = np.zeros((N,))
d_m_ext_u = np.zeros((N,))
d_m_ext_g = np.zeros((N,))
d_m_ext_r = np.zeros((N,))
d_m_ext_i = np.zeros((N,))
d_m_ext_z = np.zeros((N,))
w1_mag = np.zeros((N,))
w2_mag = np.zeros((N,))
Wba = np.zeros((N,))
Wpa = np.zeros((N,))
w1_Rasy = np.zeros((N,))
w2_Rasy = np.zeros((N,))
A_w1 = np.zeros((N,))
A_w2 = np.zeros((N,))
mu0_w1 = np.zeros((N,))
mu50_w1 = np.zeros((N,))
mu90_w1 = np.zeros((N,))
m255_w1 = np.zeros((N,))
disc_mu0_w1 = np.zeros((N,))
SLh_w1 = np.zeros((N,))
R50_w1 = np.zeros((N,))
R90_w1 = np.zeros((N,))
R255_w1 = np.zeros((N,))
Cntion_w1 = np.zeros((N,))
mu0_w2 = np.zeros((N,))
mu50_w2 = np.zeros((N,))
mu90_w2 = np.zeros((N,))
m255_w2 = np.zeros((N,))
disc_mu0_w2 = np.zeros((N,))
SLh_w2 = np.zeros((N,))
R50_w2 = np.zeros((N,))
R90_w2 = np.zeros((N,))
R255_w2 = np.zeros((N,))
Cntion_w2 = np.zeros((N,))
d_m_ext_w1 = np.zeros((N,))
d_m_ext_w2 = np.zeros((N,))
inc = np.zeros((N,))
inc_e = np.zeros((N,))
inc_flg = np.zeros((N,))
inc_note = np.zeros((N,), dtype='a100')
inc_n = np.zeros((N,))
for i in range(len(pgc_)):
## inclination
if pgc_[i] in incDic:
inc[i], inc_e[i], inc_flg[i], inc_note[i], inc_n[i]= incMedian(incDic[pgc_[i]])
radb = ra_db(ra_[i])
pgcname = 'pgc'+str(pgc_[i])
qa_txt_sdss = location_sdss + radb + '/sdss/fits/' + pgcname+'_qa.txt'
photometry_sdss = location_sdss + radb +'/photometry/'+pgcname
##################################################################### Taking care of photometry results
if os.path.exists(qa_txt_sdss):
ebv[i] = -9.99
Squality[i] = get_quality(qa_txt_sdss)
if os.path.exists(photometry_sdss+'_u_asymptotic.dat'):
uu_mag[i] = get_mag(photometry_sdss+'_u_asymptotic.dat')
u_Rasy[i] = get_mag(photometry_sdss+'_u_asymptotic.dat', index=1)/60.
A_u[i] = get_mag(photometry_sdss+'_u_asymptotic.dat', header='A_Gal:')
if os.path.exists(photometry_sdss+'_u.scales.dat'):
ebv[i], A_u[i], mu0_u[i], mu50_u[i], mu90_u[i], m255_u[i], disc_mu0_u[i], SLh_u[i], R50_u[i], R90_u[i], R255_u[i], Cntion_u[i], d_m_ext_u[i] = get_scales(photometry_sdss+'_u.scales.dat')
if os.path.exists(photometry_sdss+'_g_asymptotic.dat'):
gg_mag[i] = get_mag(photometry_sdss+'_g_asymptotic.dat')
g_Rasy[i] = get_mag(photometry_sdss+'_g_asymptotic.dat', index=1)/60.
A_g[i] = get_mag(photometry_sdss+'_g_asymptotic.dat', header='A_Gal:')
if os.path.exists(photometry_sdss+'_g.scales.dat'):
ebv[i], A_g[i], mu0_g[i], mu50_g[i], mu90_g[i], m255_g[i], disc_mu0_g[i], SLh_g[i], R50_g[i], R90_g[i], R255_g[i], Cntion_g[i], d_m_ext_g[i] = get_scales(photometry_sdss+'_g.scales.dat')
if os.path.exists(photometry_sdss+'_r_asymptotic.dat'):
rr_mag[i] = get_mag(photometry_sdss+'_r_asymptotic.dat')
r_Rasy[i] = get_mag(photometry_sdss+'_r_asymptotic.dat', index=1)/60.
A_r[i] = get_mag(photometry_sdss+'_r_asymptotic.dat', header='A_Gal:')
if os.path.exists(photometry_sdss+'_r.scales.dat'):
ebv[i], A_r[i], mu0_r[i], mu50_r[i], mu90_r[i], m255_r[i], disc_mu0_r[i], SLh_r[i], R50_r[i], R90_r[i], R255_r[i], Cntion_r[i], d_m_ext_r[i] = get_scales(photometry_sdss+'_r.scales.dat')
if os.path.exists(photometry_sdss+'_i_asymptotic.dat'):
ii_mag[i] = get_mag(photometry_sdss+'_i_asymptotic.dat')
i_Rasy[i] = get_mag(photometry_sdss+'_i_asymptotic.dat', index=1)/60.
A_i[i] = get_mag(photometry_sdss+'_i_asymptotic.dat', header='A_Gal:')
if os.path.exists(photometry_sdss+'_i.scales.dat'):
ebv[i], A_i[i], mu0_i[i], mu50_i[i], mu90_i[i], m255_i[i], disc_mu0_i[i], SLh_i[i], R50_i[i], R90_i[i], R255_i[i], Cntion_i[i], d_m_ext_i[i] = get_scales(photometry_sdss+'_i.scales.dat')
if os.path.exists(photometry_sdss+'_z_asymptotic.dat'):
zz_mag[i] = get_mag(photometry_sdss+'_z_asymptotic.dat')
z_Rasy[i] = get_mag(photometry_sdss+'_z_asymptotic.dat', index=1)/60.
A_z[i] = get_mag(photometry_sdss+'_z_asymptotic.dat', header='A_Gal:')
if os.path.exists(photometry_sdss+'_z.scales.dat'):
ebv[i], A_z[i], mu0_z[i], mu50_z[i], mu90_z[i], m255_z[i], disc_mu0_z[i], SLh_z[i], R50_z[i], R90_z[i], R255_z[i], Cntion_z[i], d_m_ext_z[i] = get_scales(photometry_sdss+'_z.scales.dat')
ellipsefile = location_sdss + radb +'/photometry/'+pgcname+'_g_ellipsepar.dat'
if os.path.exists(ellipsefile):
ra_cen, dec_cen, semimajor, semiminor, PA = get_ellipse(ellipsefile)
Sba[i] = min([semimajor,semiminor])/max([semiminor,semimajor])
Spa[i] = PA
ellipsefile = location_sdss + radb +'/photometry/'+pgcname+'_r_ellipsepar.dat'
if os.path.exists(ellipsefile):
ra_cen, dec_cen, semimajor, semiminor, PA = get_ellipse(ellipsefile)
Sba[i] = min([semimajor,semiminor])/max([semiminor,semimajor])
Spa[i] = PA
ellipsefile = location_sdss + radb +'/photometry/'+pgcname+'_i_ellipsepar.dat'
if os.path.exists(ellipsefile):
ra_cen, dec_cen, semimajor, semiminor, PA = get_ellipse(ellipsefile)
Sba[i] = min([semimajor,semiminor])/max([semiminor,semimajor])
Spa[i] = PA
if pgc_[i] in wise_pgc:
i_lst = np.where(wise_pgc == pgc_[i])
galname = wise_name[i_lst][0]
qa_txt_wise = location_wise + radb + '/wise/fits/' + galname+'_qa.txt'
if not os.path.exists(qa_txt_wise):
galname = 'pgc'+str(pgc_[i])
else:
galname = 'pgc'+str(pgc_[i])
qa_txt_wise = location_wise + radb + '/wise/fits/' + galname+'_qa.txt'
photometry_wise = location_wise + radb +'/photometry/'+galname
if os.path.exists(qa_txt_wise):
tmp = -9.99
Wquality[i] = get_quality(qa_txt_wise)
if os.path.exists(photometry_wise+'_w1_asymptotic.dat'):
w1_mag[i] = get_mag_wise(photometry_wise+'_w1_asymptotic.dat')
w1_Rasy[i] = get_mag_wise(photometry_wise+'_w1_asymptotic.dat', index=1)/60.
A_w1[i] = get_mag_f(photometry_wise+'_w1_asymptotic.dat', header='A_Gal:')
if os.path.exists(photometry_wise+'_w1.scales.dat'):
tmp, A_w1[i], mu0_w1[i], mu50_w1[i], mu90_w1[i], m255_w1[i], disc_mu0_w1[i], SLh_w1[i], R50_w1[i], R90_w1[i], R255_w1[i], Cntion_w1[i], d_m_ext_w1[i] = get_scales(photometry_wise+'_w1.scales.dat')
if os.path.exists(photometry_wise+'_w2_asymptotic.dat'):
w2_mag[i] = get_mag_wise(photometry_wise+'_w2_asymptotic.dat')
w2_Rasy[i] = get_mag_wise(photometry_wise+'_w2_asymptotic.dat', index=1)/60.
A_w2[i] = get_mag_f(photometry_wise+'_w2_asymptotic.dat', header='A_Gal:')
if os.path.exists(photometry_wise+'_w2.scales.dat'):
tmp, A_w2[i], mu0_w2[i], mu50_w2[i], mu90_w2[i], m255_w2[i], disc_mu0_w2[i], SLh_w2[i], R50_w2[i], R90_w2[i], R255_w2[i], Cntion_w2[i], d_m_ext_w2[i] = get_scales(photometry_wise+'_w2.scales.dat')
if ebv[i]<0 and tmp>0:
ebv[i] = tmp
ellipsefile = location_wise + radb +'/photometry/'+galname+'_w2_ellipsepar.dat'
if os.path.exists(ellipsefile):
ra_cen, dec_cen, semimajor, semiminor, PA = get_ellipse_wise(ellipsefile)
Wba[i] = min([semimajor,semiminor])/max([semiminor,semimajor])
Wpa[i] = PA
ellipsefile = location_wise + radb +'/photometry/'+galname+'_w1_ellipsepar.dat'
if os.path.exists(ellipsefile):
ra_cen, dec_cen, semimajor, semiminor, PA = get_ellipse_wise(ellipsefile)
Wba[i] = min([semimajor,semiminor])/max([semiminor,semimajor])
Wpa[i] = PA
##################################################################### Taking care of flags
found = False
if os.path.exists(qa_txt_sdss):
qa_txt = qa_txt_sdss
found = True
source[i] = 'SDSS'
else:
if QA_wise[i]==1:
if pgc_[i] in wise_pgc:
i_lst = np.where(wise_pgc == pgc_[i])
galname = wise_name[i_lst][0]
qa_txt_wise = location_wise + radb + '/wise/fits/' + galname+'_qa.txt'
if not os.path.exists(qa_txt_wise):
galname = 'pgc'+str(pgc_[i])
else:
galname = 'pgc'+str(pgc_[i])
qa_txt_wise = location_wise + radb + '/wise/fits/' + galname+'_qa.txt'
if os.path.exists(qa_txt_wise):
qa_txt = qa_txt_wise
found = True
source[i] = 'WISE'
else:
#print galname
#print galname, ra[i], dec[i], d25[i], d25[i]*b_a[i], PA[i], Ty[i]
source[i] = 'NONE'
if found:
if get_quality(qa_txt, nline=41)==1: disturbed[i]='D'
if get_quality(qa_txt, nline=42)==1: trail[i]='L'
if get_quality(qa_txt, nline=43)==1: not_spiral[i]='P'
if get_quality(qa_txt, nline=44)==1: face_on[i]='F'
if get_quality(qa_txt, nline=45)==1: faint[i]='N'
if get_quality(qa_txt, nline=46)==1: crowded[i]='C'
if get_quality(qa_txt, nline=47)==1: over_masked[i]='O'
if get_quality(qa_txt, nline=20)==1: fov[i]='V'
if get_quality(qa_txt, nline=19)==1: multiple[i]='M'
if get_quality(qa_txt, nline=18)==1: bright_star[i]='B'
if get_quality(qa_txt, nline=17)==1: uncertain[i]='U'
note[i]= read_note(qa_txt)
#####################################################################
myTable = Table()
myTable.add_column(Column(data=pgc_, name='pgc'))
myTable.add_column(Column(data=ra_, name='ra', format='%0.4f'))
myTable.add_column(Column(data=dec_, name='dec', format='%0.4f'))
myTable.add_column(Column(data=l_, name='gl', format='%0.4f'))
myTable.add_column(Column(data=b_, name='gb', format='%0.4f'))
myTable.add_column(Column(data=sgl_, name='sgl', format='%0.4f'))
myTable.add_column(Column(data=sgb_, name='sgb', format='%0.4f'))
myTable.add_column(Column(data=Vhel_, name='Vhel', format='%0.1f'))
myTable.add_column(Column(data=Vls_, name='Vls', format='%0.1f'))
myTable.add_column(Column(data=d25_, name='d25', format='%0.2f'))
myTable.add_column(Column(data=b_a_, name='b_a', format='%0.2f'))
myTable.add_column(Column(data=pa_, name='pa', format='%0.1f'))
myTable.add_column(Column(data=ty_, name='ty', format='%0.1f'))
myTable.add_column(Column(data=type_, name='type'))
myTable.add_column(Column(data=sdss_, name='sdss'))
myTable.add_column(Column(data=alfa100, name='alfa100'))
myTable.add_column(Column(data=QA, name='QA_sdss'))
myTable.add_column(Column(data=QA_wise, name='QA_wise'))
myTable.add_column(Column(data=ebv, name='ebv', format='%0.3f'))
myTable.add_column(Column(data=uu_mag, name='u_mag', format='%0.2f'))
myTable.add_column(Column(data=gg_mag, name='g_mag', format='%0.2f'))
myTable.add_column(Column(data=rr_mag, name='r_mag', format='%0.2f'))
myTable.add_column(Column(data=ii_mag, name='i_mag', format='%0.2f'))
myTable.add_column(Column(data=zz_mag, name='z_mag', format='%0.2f'))
myTable.add_column(Column(data=u_Rasy, name='u_Rasy', format='%0.2f'))
myTable.add_column(Column(data=g_Rasy, name='g_Rasy', format='%0.2f'))
myTable.add_column(Column(data=r_Rasy, name='r_Rasy', format='%0.2f'))
myTable.add_column(Column(data=i_Rasy, name='i_Rasy', format='%0.2f'))
myTable.add_column(Column(data=z_Rasy, name='z_Rasy', format='%0.2f'))
myTable.add_column(Column(data=A_u, name='A_u', format='%0.3f'))
myTable.add_column(Column(data=A_g, name='A_g', format='%0.3f'))
myTable.add_column(Column(data=A_r, name='A_r', format='%0.3f'))
myTable.add_column(Column(data=A_i, name='A_i', format='%0.3f'))
myTable.add_column(Column(data=A_z, name='A_z', format='%0.3f'))
myTable.add_column(Column(data=Sba, name='Sba', format='%0.2f'))
myTable.add_column(Column(data=Spa, name='Spa', format='%0.2f'))
myTable.add_column(Column(data=mu0_u, name='mu0_u', format='%0.2f'))
myTable.add_column(Column(data=mu0_g, name='mu0_g', format='%0.2f'))
myTable.add_column(Column(data=mu0_r, name='mu0_r', format='%0.2f'))
myTable.add_column(Column(data=mu0_i, name='mu0_i', format='%0.2f'))
myTable.add_column(Column(data=mu0_z, name='mu0_z', format='%0.2f'))
myTable.add_column(Column(data=mu50_u, name='mu50_u', format='%0.2f'))
myTable.add_column(Column(data=mu50_g, name='mu50_g', format='%0.2f'))
myTable.add_column(Column(data=mu50_r, name='mu50_r', format='%0.2f'))
myTable.add_column(Column(data=mu50_i, name='mu50_i', format='%0.2f'))
myTable.add_column(Column(data=mu50_z, name='mu50_z', format='%0.2f'))
myTable.add_column(Column(data=mu90_u, name='mu90_u', format='%0.2f'))
myTable.add_column(Column(data=mu90_g, name='mu90_g', format='%0.2f'))
myTable.add_column(Column(data=mu90_r, name='mu90_r', format='%0.2f'))
myTable.add_column(Column(data=mu90_i, name='mu90_i', format='%0.2f'))
myTable.add_column(Column(data=mu90_z, name='mu90_z', format='%0.2f'))
myTable.add_column(Column(data=m255_u, name='m255_u', format='%0.2f'))
myTable.add_column(Column(data=m255_g, name='m255_g', format='%0.2f'))
myTable.add_column(Column(data=m255_r, name='m255_r', format='%0.2f'))
myTable.add_column(Column(data=m255_i, name='m255_i', format='%0.2f'))
myTable.add_column(Column(data=m255_z, name='m255_z', format='%0.2f'))
myTable.add_column(Column(data=disc_mu0_u, name='disc_mu0_u', format='%0.2f'))
myTable.add_column(Column(data=disc_mu0_g, name='disc_mu0_g', format='%0.2f'))
myTable.add_column(Column(data=disc_mu0_r, name='disc_mu0_r', format='%0.2f'))
myTable.add_column(Column(data=disc_mu0_i, name='disc_mu0_i', format='%0.2f'))
myTable.add_column(Column(data=disc_mu0_z, name='disc_mu0_z', format='%0.2f'))
myTable.add_column(Column(data=SLh_u, name='h_u', format='%0.2f'))
myTable.add_column(Column(data=SLh_g, name='h_g', format='%0.2f'))
myTable.add_column(Column(data=SLh_r, name='h_r', format='%0.2f'))
myTable.add_column(Column(data=SLh_i, name='h_i', format='%0.2f'))
myTable.add_column(Column(data=SLh_z, name='h_z', format='%0.2f'))
myTable.add_column(Column(data=R50_u, name='R50_u', format='%0.2f'))
myTable.add_column(Column(data=R50_g, name='R50_g', format='%0.2f'))
myTable.add_column(Column(data=R50_r, name='R50_r', format='%0.2f'))
myTable.add_column(Column(data=R50_i, name='R50_i', format='%0.2f'))
myTable.add_column(Column(data=R50_z, name='R50_z', format='%0.2f'))
myTable.add_column(Column(data=R90_u, name='R90_u', format='%0.2f'))
myTable.add_column(Column(data=R90_g, name='R90_g', format='%0.2f'))
myTable.add_column(Column(data=R90_r, name='R90_r', format='%0.2f'))
myTable.add_column(Column(data=R90_i, name='R90_i', format='%0.2f'))
myTable.add_column(Column(data=R90_z, name='R90_z', format='%0.2f'))
myTable.add_column(Column(data=R255_u, name='R255_u', format='%0.2f'))
myTable.add_column(Column(data=R255_g, name='R255_g', format='%0.2f'))
myTable.add_column(Column(data=R255_r, name='R255_r', format='%0.2f'))
myTable.add_column(Column(data=R255_i, name='R255_i', format='%0.2f'))
myTable.add_column(Column(data=R255_z, name='R255_z', format='%0.2f'))
myTable.add_column(Column(data=Cntion_u, name='C82_u', format='%0.2f'))
myTable.add_column(Column(data=Cntion_g, name='C82_g', format='%0.2f'))
myTable.add_column(Column(data=Cntion_r, name='C82_r', format='%0.2f'))
myTable.add_column(Column(data=Cntion_i, name='C82_i', format='%0.2f'))
myTable.add_column(Column(data=Cntion_z, name='C82_z', format='%0.2f'))
myTable.add_column(Column(data=d_m_ext_u, name='d_m_ext_u', format='%0.4f'))
myTable.add_column(Column(data=d_m_ext_g, name='d_m_ext_g', format='%0.4f'))
myTable.add_column(Column(data=d_m_ext_r, name='d_m_ext_r', format='%0.4f'))
myTable.add_column(Column(data=d_m_ext_i, name='d_m_ext_i', format='%0.4f'))
myTable.add_column(Column(data=d_m_ext_z, name='d_m_ext_z', format='%0.4f'))
myTable.add_column(Column(data=w1_mag, name='w1_mag', format='%0.2f'))
myTable.add_column(Column(data=w2_mag, name='w2_mag', format='%0.2f'))
myTable.add_column(Column(data=w1_Rasy, name='w1_Rasy', format='%0.2f'))
myTable.add_column(Column(data=w2_Rasy, name='w2_Rasy', format='%0.2f'))
myTable.add_column(Column(data=A_w1, name='A_w1', format='%0.3f'))
myTable.add_column(Column(data=A_w2, name='A_w2', format='%0.3f'))
myTable.add_column(Column(data=Wba, name='Wba', format='%0.2f'))
myTable.add_column(Column(data=Wpa, name='Wpa', format='%0.2f'))
myTable.add_column(Column(data=mu0_w1, name='mu0_w1', format='%0.2f'))
myTable.add_column(Column(data=mu0_w2, name='mu0_w2', format='%0.2f'))
myTable.add_column(Column(data=mu50_w1, name='mu50_w1', format='%0.2f'))
myTable.add_column(Column(data=mu50_w2, name='mu50_w2', format='%0.2f'))
myTable.add_column(Column(data=mu90_w1, name='mu90_w1', format='%0.2f'))
myTable.add_column(Column(data=mu90_w2, name='mu90_w2', format='%0.2f'))
myTable.add_column(Column(data=m255_w1, name='m255_w1', format='%0.2f'))
myTable.add_column(Column(data=m255_w2, name='m255_w2', format='%0.2f'))
myTable.add_column(Column(data=disc_mu0_w1, name='disc_mu0_w1', format='%0.2f'))
myTable.add_column(Column(data=disc_mu0_w2, name='disc_mu0_w2', format='%0.2f'))
myTable.add_column(Column(data=SLh_w1, name='h_w1', format='%0.2f'))
myTable.add_column(Column(data=SLh_w2, name='h_w2', format='%0.2f'))
myTable.add_column(Column(data=R50_w1, name='R50_w1', format='%0.2f'))
myTable.add_column(Column(data=R50_w2, name='R50_w2', format='%0.2f'))
myTable.add_column(Column(data=R90_w1, name='R90_w1', format='%0.2f'))
myTable.add_column(Column(data=R90_w2, name='R90_w2', format='%0.2f'))
myTable.add_column(Column(data=R255_w1, name='R255_w1', format='%0.2f'))
myTable.add_column(Column(data=R255_w2, name='R255_w2', format='%0.2f'))
myTable.add_column(Column(data=Cntion_w1, name='C82_w1', format='%0.2f'))
myTable.add_column(Column(data=Cntion_w2, name='C82_w2', format='%0.2f'))
myTable.add_column(Column(data=d_m_ext_w1, name='d_m_ext_w1', format='%0.4f'))
myTable.add_column(Column(data=d_m_ext_w2, name='d_m_ext_w2', format='%0.4f'))
myTable.add_column(Column(data=Squality, name='Sqlt', dtype=np.dtype(int)))
myTable.add_column(Column(data=Wquality, name='Wqlt', dtype=np.dtype(int)))
#myTable.add_column(Column(data=source, name='source', dtype='S4'))
myTable.add_column(Column(data=disturbed, name='dst', dtype='S1'))
myTable.add_column(Column(data=trail, name='trl', dtype='S1'))
myTable.add_column(Column(data=not_spiral, name='nsp', dtype='S1'))
myTable.add_column(Column(data=face_on, name='fon', dtype='S1'))
myTable.add_column(Column(data=faint, name='fnt', dtype='S1'))
myTable.add_column(Column(data=crowded, name='cwd', dtype='S1'))
myTable.add_column(Column(data=over_masked, name='ovm', dtype='S1'))
myTable.add_column(Column(data=fov, name='fov', dtype='S1'))
myTable.add_column(Column(data=multiple, name='mlp', dtype='S1'))
myTable.add_column(Column(data=bright_star, name='bts', dtype='S1'))
myTable.add_column(Column(data=uncertain, name='unc', dtype='S1'))
myTable.add_column(Column(data=note, name='note', dtype='S100'))
myTable.add_column(Column(data=inc, name='inc', dtype=np.dtype(int)))
myTable.add_column(Column(data=inc_e, name='inc_e', dtype=np.dtype(int)))
myTable.add_column(Column(data=inc_flg, name='inc_flg', format='%1d'))
myTable.add_column(Column(data=inc_n, name='inc_n', format='%2d'))
myTable.add_column(Column(data=inc_note, name='inc_note', dtype='S100'))
myTable.write('EDD_distance_cf4_v23.csv', format='ascii.fixed_width',delimiter='|', bookend=False, overwrite=True)
| [
"ekourkchi@gmail.com"
] | ekourkchi@gmail.com |
2cecbfd5cadee6e7d888a5e28743114b68e47a97 | e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a | /Systest/tests/acl/ACL_FUN_002.py | 460f887024066ecc12d883efdf8e0848b3fe6b7c | [] | no_license | muttu2244/MyPython | 1ddf1958e5a3514f9605d1f83c0930b24b856391 | 984ca763feae49a44c271342dbc15fde935174cf | refs/heads/master | 2021-06-09T02:21:09.801103 | 2017-10-10T07:30:04 | 2017-10-10T07:30:04 | 13,803,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,009 | py | #!/usr/bin/env python2.5
"""
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
DESCRIPTION: To Verify that SSX permits/drops packets based on Precedence option. Repeat the case for inbound and outbound filter
TEST PLAN :ACL Test plans
TEST CASES:ACL_FUN_002
TOPOLOGY DIAGRAM:
---------------------------------------------
| SSX
| TransIP = 2.2.2.45/24 |
|
| Port 2/1 |
--------------------------------------------
HOW TO RUN:python2.5 ACL_FUN_004.py
AUTHOR: rajshekar@primesoftsolutionsinc.com
REVIEWER:suresh@primesoftsolutionsinc.com
"""
import sys, os
mydir = os.path.dirname(__file__)
qa_lib_dir = os.path.join(mydir, "../../lib/py")
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
#Import Frame-work libraries
from SSX import *
from Linux import *
from log import *
from StokeTest import test_case, test_suite, test_runner
from log import buildLogger
from logging import getLogger
from acl import *
from helpers import is_healthy
from misc import *
#import config and topo file
from config import *
from topo import *
class test_ACL_FUN_002(test_case):
myLog = getLogger()
def setUp(self):
#Establish a telnet session to the SSX box.
self.ssx = SSX(ssx["ip_addr"])
self.linux=Linux(linux['ip_addr'],linux['user_name'],linux['password'])
self.ssx.telnet()
self.linux.telnet()
# Clear the SSX config
self.ssx.clear_config()
# wait for card to come up
self.ssx.wait4cards()
self.ssx.clear_health_stats()
def tearDown(self):
# Close the telnet session of SSX
self.ssx.close()
self.linux.close()
def test_ACL_FUN_002(self):
# Push SSX config
self.ssx.config_from_string(script_var['ACL_FUN_002'])
#changing context and clear port counters
self.ssx.cmd("context %s" %(script_var['context_name']))
self.ssx.cmd("clear ip counters")
time.sleep(5)
#Sending Icmp packets with 0x10 QOS bits to pass thru Precedence 0 tos 4
self.linux.cmd("ping %s -c 5 -Q 0x10 "%(script_var['ssx_phy_iface1_ip']))
time.sleep(5)
#self.ssx.cmd("show ip counters icmp")
output=ip_verify_ip_counters_icmp(self.ssx,total_tx='5',total='0',echo_request='0', echo_reply='5', unreachable='0', \
mask_request='0', mask_reply='0', source_quench= '0' , param_problem='0', timestamp='0',\
redirects='0', info_reply='0', ttl_expired='0', other='0')
print output
self.failIfEqual(output,0,"Packet Filtering Unsuccessful")
self.ssx.cmd("clear ip counters")
time.sleep(5)
#Sending Icmp packets with 0x18 QOS bits to be filtered thru Precedence 0 tos 4
self.linux.cmd("ping %s -c 5 -Q 0x30 "%(script_var['ssx_phy_iface1_ip']),timeout=40)
time.sleep(5)
output=ip_verify_ip_counters_icmp(self.ssx,total_tx='0',total='0',echo_request='0', echo_reply='0', unreachable='0', \
mask_request='0', mask_reply='0', source_quench= '0' , param_problem='0', timestamp='0',\
redirects='0', info_reply='0', ttl_expired='0', other='0')
print output
self.failIfEqual(output,0,"Packet Filtering Unsuccessful")
# Checking SSX Health
hs = self.ssx.get_health_stats()
self.failUnless(is_healthy( hs), "Platform is not healthy")
if __name__ == '__main__':
filename = os.path.split(__file__)[1].replace('.py','.log')
log = buildLogger(filename, debug=True, console=True)
suite = test_suite()
suite.addTest(test_ACL_FUN_002)
test_runner(stream = sys.stdout).run(suite)
| [
"muttu2244@yahoo.com"
] | muttu2244@yahoo.com |
690865583f8714d226cb125c33e6c67608cebd3c | 87695989bbafe0ec6892cb4d1bb1965c84d6b35f | /simulation_research/next_day_wildfire_spread/export_ee_data.py | ceed9170ab0f6de90140410dc18d7d3ba57728b9 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | guangyusong/google-research | ef6e85e7de75bd1289575374accc7fe19af896c7 | cac4a7f3b82ab629e25fa8afe33ce80cc6933e54 | refs/heads/master | 2022-01-01T11:22:31.825772 | 2021-12-30T20:08:40 | 2021-12-30T20:19:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,482 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Earth Engine helper functions.
Details on the Earth Engine Data Catalog can be found here:
https://developers.google.com/earth-engine/datasets
In order to use this library you need to authenticate and initialize the
Earth Engine library.
"""
from typing import List, Text, Tuple
import ee
from simulation_research.next_day_wildfire_spread import ee_utils
def _get_all_feature_bands():
"""Returns list of all bands corresponding to features."""
return (ee_utils.DATA_BANDS[ee_utils.DataType.ELEVATION_SRTM] +
ee_utils.DATA_BANDS[ee_utils.DataType.POPULATION] +
ee_utils.DATA_BANDS[ee_utils.DataType.DROUGHT_GRIDMET] +
ee_utils.DATA_BANDS[ee_utils.DataType.VEGETATION_VIIRS] +
ee_utils.DATA_BANDS[ee_utils.DataType.WEATHER_GRIDMET])
def _get_all_response_bands():
"""Returns list of all bands corresponding to labels."""
return ee_utils.DATA_BANDS[ee_utils.DataType.FIRE_MODIS]
def _add_index(i, bands):
"""Appends the index number `i` at the end of each element of `bands`."""
return [f'{band}_{i}' for band in bands]
def _get_all_image_collections():
"""Gets all the image collections and corresponding time sampling."""
image_collections = {
'drought':
ee_utils.get_image_collection(ee_utils.DataType.DROUGHT_GRIDMET),
'vegetation':
ee_utils.get_image_collection(ee_utils.DataType.VEGETATION_VIIRS),
'weather':
ee_utils.get_image_collection(ee_utils.DataType.WEATHER_GRIDMET),
'fire':
ee_utils.get_image_collection(ee_utils.DataType.FIRE_MODIS),
}
time_sampling = {
'drought':
ee_utils.DATA_TIME_SAMPLING[ee_utils.DataType.DROUGHT_GRIDMET],
'vegetation':
ee_utils.DATA_TIME_SAMPLING[ee_utils.DataType.VEGETATION_VIIRS],
'weather':
ee_utils.DATA_TIME_SAMPLING[ee_utils.DataType.WEATHER_GRIDMET],
'fire':
ee_utils.DATA_TIME_SAMPLING[ee_utils.DataType.FIRE_MODIS],
}
return image_collections, time_sampling
def _verify_feature_collection(
feature_collection
):
"""Verifies the feature collection is valid.
If the feature collection is invalid, resets the feature collection.
Args:
feature_collection: An EE feature collection.
Returns:
`(feature_collection, size)` a tuple of the verified feature collection and
its size.
"""
try:
size = int(feature_collection.size().getInfo())
except ee.EEException:
# Reset the feature collection
feature_collection = ee.FeatureCollection([])
size = 0
return feature_collection, size
def _get_time_slices(
window_start,
window,
projection, # Defer calling until called by test code
resampling_scale,
lag = 1,
):
"""Extracts the time slice features.
Args:
window_start: Start of the time window over which to extract data.
window: Length of the window (in days).
projection: projection to reproject all data into.
resampling_scale: length scale to resample data to.
lag: Number of days before the fire to extract the features.
Returns:
A list of the extracted EE images.
"""
image_collections, time_sampling = _get_all_image_collections()
window_end = window_start.advance(window, 'day')
drought = image_collections['drought'].filterDate(
window_start.advance(-lag - time_sampling['drought'], 'day'),
window_start.advance(
-lag, 'day')).median().reproject(projection).resample('bicubic')
vegetation = image_collections['vegetation'].filterDate(
window_start.advance(-lag - time_sampling['vegetation'], 'day'),
window_start.advance(
-lag, 'day')).median().reproject(projection).resample('bicubic')
weather = image_collections['weather'].filterDate(
window_start.advance(-lag - time_sampling['weather'], 'day'),
window_start.advance(-lag, 'day')).median().reproject(
projection.atScale(resampling_scale)).resample('bicubic')
fire = image_collections['fire'].filterDate(window_start, window_end).map(
ee_utils.remove_mask).max()
detection = fire.clamp(6, 7).subtract(6).rename('detection')
return [drought, vegetation, weather, fire, detection]
def _export_dataset(
bucket,
folder,
prefix,
start_date,
start_days,
geometry,
kernel_size,
sampling_scale,
num_samples_per_file,
):
"""Exports the dataset TFRecord files for wildfire risk assessment.
Args:
bucket: Google Cloud bucket
folder: Folder to which to export the TFRecords.
prefix: Export file name prefix.
start_date: Start date for the EE data to export.
start_days: Start day of each time chunk to export.
geometry: EE geometry from which to export the data.
kernel_size: Size of the exported tiles (square).
sampling_scale: Resolution at which to export the data (in meters).
num_samples_per_file: Approximate number of samples to save per TFRecord
file.
"""
def _verify_and_export_feature_collection(
num_samples_per_export,
feature_collection,
file_count,
features,
):
"""Wraps the verification and export of the feature collection.
Verifies the size of the feature collection and triggers the export when
it is larger than `num_samples_per_export`. Resets the feature collection
and increments the file count at each export.
Args:
num_samples_per_export: Approximate number of samples per export.
feature_collection: The EE feature collection to export.
file_count: The TFRecord file count for naming the files.
features: Names of the features to export.
Returns:
`(feature_collection, file_count)` tuple of the current feature collection
and file count.
"""
feature_collection, size_count = _verify_feature_collection(
feature_collection)
if size_count > num_samples_per_export:
ee_utils.export_feature_collection(
feature_collection,
description=prefix + '_{:03d}'.format(file_count),
bucket=bucket,
folder=folder,
bands=features,
)
file_count += 1
feature_collection = ee.FeatureCollection([])
return feature_collection, file_count
elevation = ee_utils.get_image(ee_utils.DataType.ELEVATION_SRTM)
end_date = start_date.advance(max(start_days), 'days')
population = ee_utils.get_image_collection(ee_utils.DataType.POPULATION)
# Could also move to using the most recent population data for a given sample,
# which requires more EE logic.
population = population.filterDate(start_date, end_date).median()
projection = ee_utils.get_image_collection(ee_utils.DataType.WEATHER_GRIDMET)
projection = projection.first().select(
ee_utils.DATA_BANDS[ee_utils.DataType.WEATHER_GRIDMET][0]).projection()
resampling_scale = (
ee_utils.RESAMPLING_SCALE[ee_utils.DataType.WEATHER_GRIDMET])
all_days = []
for day in start_days:
for i in range(7):
all_days.append(day + i)
window = 1
sampling_limit_per_call = 60
features = _get_all_feature_bands() + _get_all_response_bands()
file_count = 0
feature_collection = ee.FeatureCollection([])
for start_day in all_days:
window_start = start_date.advance(start_day, 'days')
time_slices = _get_time_slices(window_start, window, projection,
resampling_scale)
image_list = [elevation, population] + time_slices[:-1]
detection = time_slices[-1]
arrays = ee_utils.convert_features_to_arrays(image_list, kernel_size)
to_sample = detection.addBands(arrays)
fire_count = ee_utils.get_detection_count(
detection,
geometry=geometry,
sampling_scale=10 * sampling_scale,
)
if fire_count > 0:
samples = ee_utils.extract_samples(
to_sample,
detection_count=fire_count,
geometry=geometry,
sampling_ratio=0, # Only extracting examples with fire.
sampling_limit_per_call=sampling_limit_per_call,
resolution=sampling_scale,
)
feature_collection = feature_collection.merge(samples)
feature_collection, file_count = _verify_and_export_feature_collection(
num_samples_per_file, feature_collection, file_count, features)
# Export the remaining feature collection
_verify_and_export_feature_collection(0, feature_collection, file_count,
features)
def export_ml_datasets(
bucket,
folder,
start_date,
end_date,
prefix = '',
kernel_size = 128,
sampling_scale = 1000,
eval_split_ratio = 0.125,
num_samples_per_file = 1000,
):
"""Exports the ML dataset TFRecord files for wildfire risk assessment.
Export is to Google Cloud Storage.
Args:
bucket: Google Cloud bucket
folder: Folder to which to export the TFRecords.
start_date: Start date for the EE data to export.
end_date: End date for the EE data to export.
prefix: File name prefix to use.
kernel_size: Size of the exported tiles (square).
sampling_scale: Resolution at which to export the data (in meters).
eval_split_ratio: Split ratio for the divide between training and evaluation
datasets.
num_samples_per_file: Approximate number of samples to save per TFRecord
file.
"""
split_days = ee_utils.split_days_into_train_eval_test(
start_date, end_date, split_ratio=eval_split_ratio, window_length_days=8)
for mode in ['train', 'eval', 'test']:
sub_prefix = f'{mode}_{prefix}'
_export_dataset(
bucket=bucket,
folder=folder,
prefix=sub_prefix,
start_date=start_date,
start_days=split_days[mode],
geometry=ee.Geometry.Rectangle(ee_utils.COORDINATES['US']),
kernel_size=kernel_size,
sampling_scale=sampling_scale,
num_samples_per_file=num_samples_per_file)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
df67d820d16d861532c29149ce9e341f4afbc2f8 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/192c0dc9d16458255102ce13d1707d8285defe12-<_construct_url_3>-bug.py | bc88419c2462fff4e03343f512cece7788534609 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,779 | py | def _construct_url_3(self, root, parent, obj, child_includes):
'\n This method is used by get_url when the object is the third-level class.\n '
root_rn = root['aci_rn']
root_obj = root['module_object']
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['filter_target']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['filter_target']
mo = obj['module_object']
if (not child_includes):
self_child_includes = ('&rsp-subtree=full&rsp-subtree-class=' + obj_class)
else:
self_child_includes = '{0},{1}'.format(child_includes, obj_class)
if (not child_includes):
parent_self_child_includes = '&rsp-subtree=full&rsp-subtree-class={0},{1}'.format(parent_class, obj_class)
else:
parent_self_child_includes = '{0},{1},{2}'.format(child_includes, parent_class, obj_class)
if (self.module.params['state'] != 'query'):
path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
filter_string = ('?rsp-prop-include=config-only' + child_includes)
elif ((mo is None) and (parent_obj is None) and (root_obj is None)):
path = 'api/class/{0}.json'.format(obj_class)
filter_string = ''
elif (root_obj is not None):
if (parent_obj is not None):
if (mo is not None):
path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
filter_string = ''
else:
path = 'api/mo/uni/{0}/{1}.json'.format(root_rn, parent_rn)
filter_string = self_child_includes.replace('&', '?', 1)
elif (mo is not None):
path = 'api/mo/uni/{0}.json'.format(root_rn)
filter_string = '?rsp-subtree-filter={0}{1}'.format(obj_filter, self_child_includes)
else:
path = 'api/mo/uni/{0}.json'.format(root_rn)
filter_string = ('?' + parent_self_child_includes)
elif (parent_obj is not None):
if (mo is not None):
path = 'api/class/{0}.json'.format(parent_class)
filter_string = '?query-target-filter={0}{1}&rsp-subtree-filter={2}'.format(parent_filter, self_child_includes, obj_filter)
else:
path = 'api/class/{0}.json'.format(parent_class)
filter_string = '?query-target-filter={1}{2}'.format(parent_filter, self_child_includes)
else:
path = 'api/class/{0}.json'.format(obj_class)
filter_string = ('?query-target-filter={0}'.format(obj_filter) + child_includes)
if ((child_includes is not None) and (filter_string == '')):
filter_string = child_includes.replace('&', '?', 1)
return (path, filter_string) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
5bd7c7213bc5d8d57013a4f313bf573a5ed1efca | 3d31d1ebeac4586a455c08d551e81e5596f4a8c4 | /dev/06_21_2018/UPS_Error.py | 34ccb3e8a9a17add7d24bebf44b7e78b6d4fa47d | [
"Python-2.0"
] | permissive | npwebste/UPS_Controller | 0ff9670abd6f4ff5d4f3a5ec3003e4f4ddfcf148 | a90ce2229108197fd48f956310ae2929e0fa5d9a | refs/heads/master | 2021-05-10T10:00:03.186490 | 2018-08-04T23:37:16 | 2018-08-04T23:37:16 | 118,942,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | # Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
def UPS_Error(ErrorCode):
if (ErrorCode == 'Error_VFD_Freq'):
print('VFD frequency set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Volt':
print('VFD votlage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Amps':
print('VFD current set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Power':
print('VFD power set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_BusVolt':
print('VFD bus voltage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Temp':
print('VFD temperature set above maximum, shutting down motor')
elif ErrorCode == 'Error_Solar_Voltage':
print('Solar voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_DC_Link_Voltage':
print('DC link voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_Voltage_Measurement':
print('Incorrect voltage measurement input')
elif ErrorCode == 'Error_Transfer_Switch':
print('Incorrect transfer switch input')
elif ErrorCode == 'Error_VFD_Power':
print('Incorrect power calculation')
elif ErrorCode == 'Error_Duty_Cycle':
print('Incorrect power calculation')
return | [
"30417327+npwebste@users.noreply.github.com"
] | 30417327+npwebste@users.noreply.github.com |
d320b73bc06a0e30403bc1b844e6b62680e3ca62 | d8b1a010bec02de76f179e0d3df113903d91db71 | /TRP_api/TRP_api/urls.py | 055e259fb94ae21368deb72c31aa206e3420ccdd | [] | no_license | vineetkashyap/api12may | 2e933815972bf24cd7ff5efe6800e5fe70122cb1 | 29c1b6fa9c55c900a2bdd1175e0e74a5113fdbdd | refs/heads/master | 2023-05-01T16:54:09.949319 | 2021-05-16T09:37:15 | 2021-05-16T09:37:15 | 366,686,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | from django.contrib import admin
from django.urls import path,include
from api import views
from django.conf.urls.static import static
from django.conf import settings
from rest_framework.authtoken.views import obtain_auth_token
from knox import views as knox_views
from rest_framework.routers import DefaultRouter
router= DefaultRouter()
router.register('truckowner',views.TruckOwnerModel_View,basename='truckowner')
router.register('transporter',views.TransporterModel_View,basename='transporter')
router.register('agent',views.Tranage_AgentSerializer_View,basename='agent')
router.register('vehicle',views.VehicleRegistraionModelSerializer_View,basename='vehicle')
router.register('driver',views.DriverRegistrationSerializer_View,basename='driver')
urlpatterns = [
path('admin/', admin.site.urls),
path('',include(router.urls)),
path('auth/',include('rest_framework.urls')),
path('gettoken/',obtain_auth_token),
path('gettruckowner/',views.gettruckowner,name='gettruckowner'),
path('gettransporter/',views.gettransporter,name='gettransporter'),
path('getagent/',views.getagent,name='getagent'),
path('getdriver/',views.getdriver,name='getdriver'),
path('getvehicle/',views.getvehicle,name='getvehicle'),
path('api/register/', views.RegisterAPI.as_view(), name='register'),
path('api/login/', views.LoginAPI.as_view(), name='login'),
path('api/logout/', knox_views.LogoutView.as_view(), name='logout'),
path('api/logoutall/', knox_views.LogoutAllView.as_view(), name='logoutall'),
]+static(settings.MEDIA_URL,document_root = settings.MEDIA_ROOT) | [
"vkvineet66@gmail.com"
] | vkvineet66@gmail.com |
41306b320939acb879a6f70f56bcdaf86f2f1648 | 47eb0bcfee356f35a607e4a31b150662d6b0f0bb | /app/shop/admin.py | d2afdb3bb984e827e90b5209645446251a790204 | [] | no_license | HyungtaeMoon/IAMPORT-ShoppingMall | 955183f2ea2a573737bc236940457733b1ead659 | 2c19290380e1c739df31583a9c55bb9d719c6af8 | refs/heads/master | 2022-12-09T14:15:48.563197 | 2019-03-22T13:02:35 | 2019-03-22T13:02:35 | 154,930,700 | 0 | 0 | null | 2022-07-29T22:51:19 | 2018-10-27T05:47:04 | Python | UTF-8 | Python | false | false | 1,637 | py | from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Item, Order
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ['photo_tag', 'name', 'amount']
def photo_tag(self, item):
if item.photo:
return mark_safe('<img src={} style="width: 75px;" />'.format(item.photo.url))
return None
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ['imp_uid', 'user', 'name', 'amount_html', 'status_html', 'paid_at', 'receipt_link']
actions = ['do_update', 'do_cancel']
def do_update(self, request, queryset):
'주문 정보를 갱신합니다.'
total = queryset.count()
if total > 0:
for order in queryset:
order.update()
self.message_user(request, '주문 {}건의 정보를 갱신했습니다.'.format(total))
else:
self.message_user(request, '갱신할 주문이 없습니다')
do_update.short_description = '선택된 주문들의 아임포트 갱신하기'
def do_cancel(self, request, queryset):
'선택된 주문에 대해 결제 취소 요청을 합니다'
queryset = queryset.filter(status='paid')
total = queryset.count()
if total > 0:
for order in queryset:
order.cancel()
self.message_user(request, '주문 {}건을 취소했습니다'.format(total))
else:
self.message_user(request, '취소할 주문이 없습니다')
do_cancel.short_description = '선택된 주문에 대해 결제 취소 요청하기'
| [
"blessmht@gmail.com"
] | blessmht@gmail.com |
2649375b6ae17e806f70ee2bf3a16d2a3073137b | bc91d344ed2ee3f4f93547ec16350f2713e5f704 | /.history/CRUD/views_20190108015459.py | 2867d966ebaada4cf94ef22ab56fdacbeb9fdf8a | [] | no_license | SabitDeepto/Chitra | 10ecf0c4a7588234f0a50adf038783c9ce8706d0 | 160e5d64c8e4ee56a95bb639386785590160ff07 | refs/heads/master | 2020-04-27T21:55:09.685341 | 2019-03-09T16:14:35 | 2019-03-09T16:14:35 | 174,716,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from django.shortcuts import render
from urllib3 import request
from CRUD.models import Executive
# Create your views here.
def home(request):
executive = Executive.objects.all()
templates = 'index.html'
context = {
'name': 'deepto',
'ex':executive
}
return render(request, templates, context)
def create_executive(request):
return render(request, 'reg') | [
"deepto69@gmail.com"
] | deepto69@gmail.com |
901c1ff1b89ae7d1354864bb1bf830a4d212d44a | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/gallery/v4_1/models/answers.py | edb392c5bc4c0196922c3ebb9c193159982a3ed4 | [] | no_license | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Answers(Model):
"""Answers.
:param vSMarketplace_extension_name: Gets or sets the vs marketplace extension name
:type vSMarketplace_extension_name: str
:param vSMarketplace_publisher_name: Gets or sets the vs marketplace publsiher name
:type vSMarketplace_publisher_name: str
"""
_attribute_map = {
'vSMarketplace_extension_name': {'key': 'vSMarketplaceExtensionName', 'type': 'str'},
'vSMarketplace_publisher_name': {'key': 'vSMarketplacePublisherName', 'type': 'str'}
}
def __init__(self, vSMarketplace_extension_name=None, vSMarketplace_publisher_name=None):
super(Answers, self).__init__()
self.vSMarketplace_extension_name = vSMarketplace_extension_name
self.vSMarketplace_publisher_name = vSMarketplace_publisher_name
| [
"usama.blavins1@gmail.com"
] | usama.blavins1@gmail.com |
904f854544ae4bd75bb61e866feebe0311a679b2 | a5597d74049fcbe1e1e3afca1f4196243f2e7c90 | /glyce/bin/run_bert_glyce_classifier.py | 453bf9727fb540d922fe9933a41983a23093bb7f | [
"Apache-2.0"
] | permissive | YuChen17Heaven/glyce | 72759d8699bbe37ecd2221e90b8ec06a8844fd29 | 62369e3cc37442ed191862b77d87d0c17c8454f8 | refs/heads/master | 2020-06-14T01:52:41.111642 | 2019-06-30T10:52:10 | 2019-06-30T10:52:10 | 194,857,610 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,405 | py | # encoding: utf-8
"""
@author: Yuxian Meng
@contact: yuxian_meng@shannonai.com
@version: 1.0
@file: run_glyph_classifier
@time: 2019/4/8 15:40
这一行开始写关于本文件的说明与解释
"""
import os
import sys
root_path = "/".join(os.path.realpath(__file__).split("/")[:-3])
print("check the root_path of this repo")
print(root_path)
if root_path not in sys.path:
sys.path.insert(0, root_path)
import torch
from glyce.utils.tokenization import BertTokenizer
from glyce.utils.optimization import BertAdam
from glyce.dataset_readers.bert_config import Config
from glyce.dataset_readers.bert_sent_pair import *
from glyce.models.glyce_bert.glyce_bert_classifier import GlyphBertClassifier
from glyce.dataset_readers.bert_data_utils import convert_examples_to_features
from glyce.utils.metrics.cls_evaluate_funcs import acc_and_f1
logging.basicConfig()
logger = logging.getLogger(__name__)
def args_parser():
# start parser
parser = argparse.ArgumentParser()
# required parameters
parser.add_argument("--config_path", default="/home/lixiaoya/dataset/", type=str)
parser.add_argument("--data_dir", default=None, type=str, help="the input data dir")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="bert-large-uncased, bert-base-cased, bert-large-cased")
parser.add_argument("--task_name", default=None, type=str)
# parser.add_argument("--output_dir", default=None,
# type=str, required=True, help="the outptu directory where the model predictions and checkpoints will")
# # other parameters
parser.add_argument("--cuda", type=bool, default=True)
parser.add_argument("--max_seq_length", default=128,
type=int, help="the maximum total input sequence length after ")
parser.add_argument("--do_train", action="store_true",
help="Whether to run training")
parser.add_argument("--do_eval", action="store_true",
help="set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--dev_batch_size", default=32, type=int)
parser.add_argument("--checkpoint", default=100, type=int)
parser.add_argument("--test_batch_size", default=32, type=int)
parser.add_argument("--learning_rate", default=5e-5, type=float)
parser.add_argument("--num_train_epochs", default=3.0, type=float)
parser.add_argument("--warmup_proportion", default=0.1, type=float)
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--seed", type=int, default=3306)
parser.add_argument("--nworkers", type=int, default=1)
parser.add_argument("--step", type=int, default=1)
parser.add_argument("--export_model", type=bool, default=True)
parser.add_argument("--output_dir", type=str, default="/data/nfsdata/data/yuxian/train_logs")
parser.add_argument("--data_sign", type=str, default="nlpcc-dbqa")
# classifier_sign == "single_linear
parser.add_argument("--classifier_sign", type=str, default="single_linear")
args = parser.parse_args()
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
return args
def load_data(config):
# load some data and processor
# data_processor = MsraNerProcessor()
if config.data_sign == "nlpcc-dbqa":
data_processor = DBQAProcessor()
else:
raise ValueError
label_list = data_processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(config.bert_model, do_lower_case=True)
# load data exampels
train_examples = data_processor.get_train_examples(config.data_dir)
dev_examples = data_processor.get_dev_examples(config.data_dir)
test_examples = data_processor.get_test_examples(config.data_dir)
# convert data example into featrues
train_features = convert_examples_to_features(train_examples, label_list, config.max_seq_length, tokenizer,
task_sign=config.task_name)
train_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
train_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
train_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
train_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(train_input_ids, train_input_mask, train_segment_ids, train_label_ids)
# train_sampler = DistributedSampler(train_data)
train_sampler = RandomSampler(train_data)
dev_features = convert_examples_to_features(dev_examples, label_list, config.max_seq_length, tokenizer,
task_sign=config.task_name)
dev_input_ids = torch.tensor([f.input_ids for f in dev_features], dtype=torch.long)
dev_input_mask = torch.tensor([f.input_mask for f in dev_features], dtype=torch.long)
dev_segment_ids = torch.tensor([f.segment_ids for f in dev_features], dtype=torch.long)
dev_label_ids = torch.tensor([f.label_id for f in dev_features], dtype=torch.long)
dev_data = TensorDataset(dev_input_ids, dev_input_mask, dev_segment_ids, dev_label_ids)
dev_sampler = RandomSampler(dev_data)
test_features = convert_examples_to_features(test_examples, label_list, config.max_seq_length, tokenizer,
task_sign=config.task_name)
test_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
test_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
test_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
test_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(test_input_ids, test_input_mask, test_segment_ids, test_label_ids)
# test_sampler = DistributedSampler(test_data)
test_sampler = RandomSampler(test_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, \
batch_size=config.train_batch_size, num_workers=config.nworkers)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, \
batch_size=config.dev_batch_size, num_workers=config.nworkers)
test_dataloader = DataLoader(test_data, sampler=test_sampler, \
batch_size=config.test_batch_size, num_workers=config.nworkers)
num_train_steps = int(len(train_examples) / config.train_batch_size * 5)
return train_dataloader, dev_dataloader, test_dataloader, num_train_steps, label_list
def load_model(config, num_train_steps, label_list):
# device = torch.device(torch.cuda.is_available())
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
model = GlyphBertClassifier(config, num_labels=len(label_list))
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# prepare optimzier
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=config.learning_rate,
warmup=config.warmup_proportion,
t_total=num_train_steps)
return model, optimizer, device, n_gpu
def train(model, optimizer, train_dataloader, dev_dataloader, test_dataloader, config, \
device, n_gpu, label_list):
global_step = 0
nb_tr_steps = 0
tr_loss = 0
dev_best_acc = 0
dev_best_precision = 0
dev_best_recall = 0
dev_best_f1 = 0
dev_best_loss = 10000000000000
test_best_acc = 0
test_best_precision = 0
test_best_recall = 0
test_best_f1 = 0
test_best_loss = 1000000000000000
model.train()
for idx in range(int(config.num_train_epochs)):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
print("#######" * 10)
print("EPOCH: ", str(idx))
for step, batch in tqdm(enumerate(train_dataloader)):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss, glyph_loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean()
glyph_loss = loss.mean()
if global_step < config.glyph_warmup:
sum_loss = loss + config.glyph_ratio * glyph_loss
else:
sum_loss = loss + config.glyph_ratio * glyph_loss * config.glyph_decay ** (idx + 1)
sum_loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
if (nb_tr_steps+1) % config.checkpoint == 0:
print("-*-" * 15)
print("current training loss is : ")
print("classification loss, glyph loss")
print(loss.item(), glyph_loss.item())
tmp_dev_loss, tmp_dev_acc, tmp_dev_f1 = eval_checkpoint(model,
dev_dataloader,
config, device,
n_gpu, label_list,
eval_sign="dev")
print("......" * 10)
print("DEV: loss, acc, f1")
print(tmp_dev_loss, tmp_dev_acc, tmp_dev_f1)
if tmp_dev_f1 > dev_best_f1 or tmp_dev_acc > dev_best_acc:
dev_best_acc = tmp_dev_acc
dev_best_loss = tmp_dev_loss
# dev_best_precision = tmp_dev_prec
# dev_best_recall = tmp_dev_rec
dev_best_f1 = tmp_dev_f1
tmp_test_loss, tmp_test_acc, tmp_test_f1 = eval_checkpoint(model,
test_dataloader,
config,
device,
n_gpu,
label_list,
eval_sign="test")
print("......" * 10)
print("TEST: loss, acc, f1")
print(tmp_test_loss, tmp_test_acc, tmp_test_f1)
if tmp_test_f1 > test_best_f1 or tmp_test_acc > test_best_acc:
test_best_acc = tmp_test_acc
test_best_loss = tmp_test_loss
# test_best_precision = tmp_test_prec
# test_best_recall = tmp_test_rec
test_best_f1 = tmp_test_f1
# export model
if config.export_model:
model_to_save = model.module if hasattr(model, "module") else model
output_model_file = os.path.join(config.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
print("-*-" * 15)
# export a trained mdoel
model_to_save = model
output_model_file = os.path.join(config.output_dir, "bert_model.bin")
if config.export_model == "True":
torch.save(model_to_save.state_dict(), output_model_file)
print("=&=" * 15)
print("DEV: current best precision, recall, f1, acc, loss ")
print(dev_best_precision, dev_best_recall, dev_best_f1, dev_best_acc, dev_best_loss)
print("TEST: current best precision, recall, f1, acc, loss ")
print(test_best_precision, test_best_recall, test_best_f1, test_best_acc, test_best_loss)
print("=&=" * 15)
def eval_checkpoint(model_object, eval_dataloader, config, \
device, n_gpu, label_list, eval_sign="dev"):
# input_dataloader type can only be one of dev_dataloader, test_dataloader
model_object.eval()
idx2label = {i: label for i, label in enumerate(label_list)}
eval_loss = 0
eval_accuracy = []
eval_f1 = []
eval_recall = []
eval_precision = []
eval_steps = 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, glyph_loss = model_object(input_ids, segment_ids, input_mask, label_ids)
logits, glyph_loss = model_object(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
# logits = np.argmax(logits, axis=-1)
label_ids = label_ids.to("cpu").numpy()
input_mask = input_mask.to("cpu").numpy()
# reshape_lst = label_ids.shape
# logits = np.reshape(logits, (reshape_lst[0], reshape_lst[1], -1))
logits = np.argmax(logits, axis=-1)
# logits = logits.tolist()
# logits = [[idx2label[tmp] for tmp in logit_item] for logit_item in logits]
# label_ids = label_ids.tolist()
input_mask = input_mask.tolist()
# label_ids = [[idx2label[tmp] for tmp in label_item] for label_item in label_ids]
# print("check the format and content of labels and logtis")
# print(logits)
# print(label_ids)
# exit()
# tmp_accuracy = cal_accuracy(logits, label_ids, label_list)
# eval_accuracy.append(tmp_accuracy)
eval_loss += tmp_eval_loss.mean().item()
# tmp_precision, tmp_recall, tmp_f1 = cal_ner_f1(logits, label_ids, label_list)
metric = acc_and_f1(preds=logits, labels=label_ids)
# print("check the labels and output")
# print(logits[0])
# print(label_ids[0])
eval_accuracy.append(metric['acc'])
# eval_precision.append(tmp_precision)
# eval_recall.append(tmp_recall)
eval_f1.append(metric['f1'])
eval_steps += 1
average_loss = round(eval_loss / eval_steps, 4)
eval_f1 = round(sum(eval_f1) / (len(eval_f1)), 4)
# eval_precision = round(sum(eval_precision) / len(eval_precision), 4)
# eval_recall = round(sum(eval_recall) / len(eval_recall), 4)
eval_accuracy = round(sum(eval_accuracy) / len(eval_accuracy), 4)
return average_loss, eval_accuracy, eval_f1 # eval_precision, eval_recall, eval_f1
def merge_config(args_config):
model_config_path = args_config.config_path
model_config = Config.from_json_file(model_config_path)
model_config.update_args(args_config)
# print(model_config.to_dict())
model_config.print_config()
return model_config
def main():
args_config = args_parser()
config = merge_config(args_config)
train_loader, dev_loader, test_loader, num_train_steps, label_list = load_data(config)
model, optimizer, device, n_gpu = load_model(config, num_train_steps, label_list)
train(model, optimizer, train_loader, dev_loader, test_loader, config, device, n_gpu, label_list)
# train(model, optimizer, train_dataloader, dev_dataloader, test_dataloader, config, \
# device, n_gpu)
if __name__ == "__main__":
main()
| [
"xiaoyli@outlook.com"
] | xiaoyli@outlook.com |
823d055c9fbcc5df091127861acf22ab33ef3444 | 884249dc53b3e1a4461f44cc07f3c11b798a8bee | /tests/null/test_dropna.py | 9963ab1313220b26c0d86461aafd76144fe03e77 | [
"MIT"
] | permissive | MacHu-GWU/learn_pandas-project | b82ca64061c0afd2e470e0a7d17a8997981fd219 | 86d51d11d6f0a50ffcffbf743da197f4a7b12d61 | refs/heads/master | 2021-01-15T11:29:59.682659 | 2018-02-02T16:55:41 | 2018-02-02T16:55:41 | 99,621,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
from learn_pandas import assert_value_equal
def test_dropna():
data = [[1, 2, 3],
[None, 5, 6],
[7, 8, 9]]
df = pd.DataFrame(
data,
index=[1, 2, 3],
columns=list("ABC"),
)
res = df.dropna(axis=0) # by row
assert_value_equal(res, [[1, 2, 3], [7, 8, 9]])
"""
A B C
1 1 2 3
3 7 8 9
"""
res = df.dropna(axis=1) # by column
assert_value_equal(res, [[2, 3], [5, 6], [8, 9]])
"""
B C
1 2 3
2 5 6
3 8 9
"""
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| [
"MacHu-GWU@users.noreply.github.com"
] | MacHu-GWU@users.noreply.github.com |
4366e63b4ea98f08dfbfb6348ad3e918f1f35b63 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/keras/distribute/distributed_training_utils.py | d77ef72ddd8e104d640d119c9d5f06aca8845479 | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,556 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
# pylint:disable=protected-access
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.keras import backend
from tensorflow.python.ops import variables
# TODO(b/118776054): Currently we support global batch size for TPUStrategy and
# core MirroredStrategy only. Remove this check when contrib MirroredStrategy is
# no longer needed.
def global_batch_size_supported(distribution_strategy):
return (
distribution_strategy.extended._global_batch_size
) # pylint: disable=protected-access
def call_replica_local_fn(fn, *args, **kwargs):
"""Call a function that uses replica-local variables.
This function correctly handles calling `fn` in a cross-replica
context.
Args:
fn: The function to call.
*args: Positional arguments to the `fn`.
**kwargs: Keyword argument to `fn`.
Returns:
The result of calling `fn`.
"""
# TODO(b/132666209): Remove this function when we support assign_*
# for replica-local variables.
strategy = None
if "strategy" in kwargs:
strategy = kwargs.pop("strategy")
else:
if ds_context.has_strategy():
strategy = ds_context.get_strategy()
# TODO(b/120571621): TPUStrategy does not implement replica-local variables.
is_tpu = backend.is_tpu_strategy(strategy)
if (not is_tpu) and strategy and ds_context.in_cross_replica_context():
with strategy.scope():
return strategy.extended.call_for_each_replica(fn, args, kwargs)
return fn(*args, **kwargs)
def is_distributed_variable(v):
"""Returns whether `v` is a distributed variable."""
return isinstance(v, values_lib.DistributedValues) and isinstance(
v, variables.Variable
)
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
6bd5a404f883d52e21e57ccb4185601a09c9c35c | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/utils/ocr/handle_image_20210209180144.py | 622b08919b45fce1289c8a8afa9b1bf7344a199b | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,887 | py | import os
import cv2
import re
import numpy as np
from PIL import Image
import pytesseract
from pytesseract import Output
from fpdf import FPDF
import matplotlib.pyplot as plt
'''
IMAGE HANDLING METHODS
'''
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur removal
def remove_blur(image):
return cv2.medianBlur(image,5)
# noise removal
def remove_noise(image):
return cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 15)
#thresholding
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
#dilation
def dilate(image):
kernel = np.ones((5,5),np.uint8)
return cv2.dilate(image, kernel, iterations = 1)
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
def extract_pdf_from_image(fileName='', pdf_path='', action='', psm=3):
'''
Extract text from image and save as PDF.
fileName=''
pdf_path='',
action='',
psm=3
'''
print(f'FileName is {fileName}')
#custom_config = r'-c tessedit_char_whitelist=123456789MALEPQRETHANabcdefghijklmnopqrstuvwxyz --psm 6'
#custom_config = r'-l eng --psm 11'
custom_config = r'-l eng --psm ' + str(psm)
pdfdir = pdf_path
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
# pdfFileName = os.path.basename(fileName).split('.')[0] + '.pdf'
pdfFileName = os.path.basename(fileName).split('.')[0]+ '.pdf'
pdfFilePath = pdfdir + '/' + pdfFileName
print(f'PDF File Path {pdfFilePath}')
#d = pytesseract.image_to_data(img, output_type=Output.DICT)
img = cv2.imread(fileName)
img1 = None
if (action == 1):
img1 = remove_noise(img)
if (action == 2):
img1 = get_grayscale(img)
#img1 = erode(img)
if (action == 3):
img1 = remove_blur(img)
#text = pytesseract.image_to_string(img1, config=custom_config,lang='eng')
text = pytesseract.image_to_pdf_or_hocr(img1, extension='pdf')
with open(pdfFilePath, mode = 'w+b') as f:
f.write(text)
return pdfFilePath
def convert_text_to_pdf(text='', pdf_path='', filename=''):
'''
Convert text file to PDF
text=''
pdf_path=''
filename=''
'''
tempdir = "/tmp"
pdfdir = pdf_path
textFileName = tempdir + '/' + filename + ".txt"
pdfFileName = pdfdir + '/' + filename + ".pdf"
if not os.path.exists(tempdir):
os.makedirs(tempdir)
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
# save FPDF() class into a
# variable pdf
pdf = FPDF()
# Add a page
pdf.add_page()
# set style and size of font
# that you want in the pdf
pdf.set_font("Arial", size = 15)
with open(textFileName, mode = 'w+b') as f:
f.write(text)
line = 1
f = open(textFileName, "r")
for x in f:
x1 = re.sub(u"(\u2014|\u2018|\u2019|\u201c|\u201d)", "", x)
pdf.cell(100, 10, txt=x1, ln=line, align='L')
line=line+1
#save the pdf with name .pdf
pdf.output(pdfFileName,'F')
def mark_region(image_path):
print(f'image_path {image_path}')
image = None
im = None
try:
except Exception as exc:
print(f'Exception in mark_region {exc')
im = cv2.imread(image_path, 1)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,30)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
line_items_coordinates = []
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
print(f'y {y} and x {x}; area is {area}')
if y >= 600 and x <= 1000:
if area > 1000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
if y >= 2400 and x<= 2000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
image = cv2.imread(image, cv2.COLOR_BGR2GRAY)
# try:
# get co-ordinates to crop the image
# c = line_items_coordinates[1]
c = line_items_coordinates[0]
print(f'line_items_coordnates {c}')
# cropping image img = image[y0:y1, x0:x1]
img = image[c[0][1]:c[1][1], c[0][0]:c[1][0]]
print(f'img is {img}')
plt.figure(figsize=(10,10))
plt.imshow(img)
# convert the image to black and white for better OCR
ret,thresh1 = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
# pytesseract image to string to get results
text = str(pytesseract.image_to_string(thresh1, config='--psm 6'))
print(f'text is {text}')
convert_text_to_pdf(text, '', os.path.basename(pdf_doc).split('.')[0])
return (image, line_items_coordinates) | [
"{abhi@third-ray.com}"
] | {abhi@third-ray.com} |
3ae0a854793798036d9b002c8839bb944baf9a81 | 466912406272829982f75854cf0104c6ce8c9814 | /data/spider2/migrate/clean_beian_bywhois.py | 3638dc4f8cbceed4d78ca34670fd75b8d298adef | [] | no_license | logonmy/Codes | 9631fa103fc499663361fa7eeccd7cedb9bb08e4 | 92723efdeccfc193f9ee5d0ab77203c254f34bc2 | refs/heads/master | 2021-09-21T18:07:22.985184 | 2018-08-30T05:53:26 | 2018-08-30T05:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | # -*- coding: utf-8 -*-
import os, sys
from pymongo import MongoClient
import pymongo
import time
import gevent
from gevent.event import Event
from gevent import monkey; monkey.patch_all()
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import loghelper, config
import db
import util
#logger
loghelper.init_logger("domain_2_beian", stream=True)
logger = loghelper.get_logger("domain_2_beian")
#mongo
mongo = db.connect_mongo()
collection = mongo.info.beian
BEIANS =[]
def whoisCheck():
while True:
if len(BEIANS) == 0:
return
beian = BEIANS.pop(0)
#logger.info(beian["domain"])
domain = str(beian["domain"])
creation_date = util.whois_creation_date(domain)
if creation_date is not None:
logger.info("%s : %s -> %s", beian["domain"], beian["beianDate"], creation_date)
if beian["beianDate"] > creation_date:
collection.update({"domain": beian["domain"]}, {"$set": {"whoisChecked": True, "whoisExpire":"N"}},multi=True)
else:
logger.info("Expire %s", domain)
collection.update({"domain": beian["domain"]}, {"$set": {"whoisChecked": True, "whoisExpire":"Y"}},multi=True)
else:
logger.info("%s has no whois data",domain)
collection.update({"domain": beian["domain"]}, {"$set": {"whoisChecked": True, "whoisExpire":"NA"}},multi=True)
if __name__ == "__main__":
concurrent_num = 30
while True:
logger.info("beian check by whois start...")
# run(appmkt, WandoujiaCrawler(), "com.ctd.m3gd")
beians = list(collection.find({"whoisChecked": {"$ne": True}}, limit=10000))
for beian in beians:
BEIANS.append(beian)
#logger.info(BEIANS)
if len(beians) > 0:
threads = [gevent.spawn(whoisCheck) for i in xrange(concurrent_num)]
gevent.joinall(threads)
else:
#break
logger.info("beian check by whois end.")
time.sleep(30 * 60) | [
"hush_guo@163.com"
] | hush_guo@163.com |
0d39a0a246d72d162c9cd1e2203c2e8a6e26f448 | cfeb96d00a07cf3a5743a44fcef546aceeae3d3a | /309-BestTimetoBuyandSellStockwithCooldown.py | bc02e67e339e2e08644f56958e4aba5d681b2afe | [] | no_license | minseoch/algorithm | d5218b6187129e68aa355ce6cc89a3496e0f654c | 4c0cfe857f5d78a44c1a3bfb2571d72da4911d97 | refs/heads/master | 2023-03-04T02:10:02.344708 | 2021-01-30T04:44:28 | 2021-01-30T04:44:28 | 296,403,602 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | # 309. Best Time to Buy and Sell Stock with Cooldown
# buy-sell-cooldown
# You must sell the stock before you buy again
# After you sell your stock, you cannot buy stock on next day
class Solution:
# time O(n^2), space O(n)
def maxProfit(self, prices):
length = len(prices)
maxP = [0] * (length+2)
for i in range(length-1, -1, -1): # ith day- buying or not
# in case buying, find selling day to maximize profit
curMaxProfit = 0
for sellDay in range(i+1, length):
curProfit = prices[sellDay] - prices[i] + maxP[sellDay+2] #selling profit on 'sellDay' + profit on 2 days after
curMaxProfit = max(curMaxProfit, curProfit)
# in case do nothing, choose which is better
maxP[i] = max(maxP[i+1], curMaxProfit)
return maxP[0]
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/discuss/293789/Python-O(N)-DP-I-believe-my-state-is-easier-to-understand
# Use DP to solve this problem. It is obviously to come up with DP, because this is a "stage-decision-problem", every day we decide buy/sell/rest.
#
# The state is defined as below
#
# dp(n, 0) -> The max profix we get on day n, if we rest on day n.
# dp(n, 1) -> The max profix we get on day n, if we buy on day n.
# dp(n, 2) -> The max profix we get on day n, if we sell on day n.
#
# Below is the state transition function
#
# dp(n, 0) = max{ dp(n-1, 1), dp(n-1, 0), dp(n-1, 2) },
# if we rest on day n, we do not really care about what we have done on day n-1,
# you can do whatever you want, and we just take the max profit from day n-1
# dp(n, 1) = dp[n-1][0] - prices[n],
# if we buy on day n, we cannot buy on day n-1, because double-buy is by natural disallowed
# in the "Stock" Series. We cannot sell on day n-1, because of the new cool-down policy.
# So in day n-1, we can only rest.
# dp(n, 2) = max {dp(0, 1), dp(1, 1), ...., dp(n-1, 1)} + prices[n],
# if we sell on day n, we need to make sure we buy the stock before in one of (0...n-1).
# For example, if you rest on the first 2 days, there is NOTHING for you to sell on the 3rd day.
# Among all the possible "buy-day", we pick the one with max-profix
# Now, you might think: hmmmm, this is an O(N^2) DP because of 3.,
# we need to get max from a list of values in each iteration.
# Not really, you can keep track of the max of the past dp(n, 1).
# In the following solution, I use the var bought to keep track.
def maxProfit2(self, prices):
if not prices:
return 0
dp = [[0 for _ in range(3)] for _ in range(len(prices))]
dp[0][0] = 0
dp[0][1] = -prices[0]
dp[0][2] = float('-inf')
bought = dp[0][1]
n = len(prices)
for i in range(1, n):
dp[i][0] = max([dp[i - 1][0], dp[i - 1][2], dp[i - 1][1]])
dp[i][1] = dp[i - 1][0] - prices[i]
dp[i][2] = bought + prices[i]
bought = max(bought, dp[i][1])
print(f"bought={bought}")
print(dp)
return max(dp[n - 1])
obj = Solution()
prices = [10,20,30,0,20]
prices = [30,40,50,10,20,70]
print(obj.maxProfit(prices))
# goot to read
# https://medium.com/algorithms-and-leetcode/best-time-to-buy-sell-stocks-on-leetcode-the-ultimate-guide-ce420259b323 | [
"minseo@Minseos-iMac.local"
] | minseo@Minseos-iMac.local |
9be27768994baa40fb29747a6e6ef408b6c00d8c | b69eefcef9398cff6b00f4531736d6f391743481 | /project/settings.py | 0a9d67dad4fbcd510882739617577bbbf7afdec7 | [] | no_license | cjdd3b/j4462-project-template | c47981500c11573acdd5bf240f25aca95184d5de | 35720f9c5ea2d5dc147811af80c113f058c03496 | refs/heads/master | 2016-09-05T17:42:44.186628 | 2013-04-06T18:26:25 | 2013-04-06T18:26:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,384 | py | import os
import django
# CHASE NOTE: These are a few settings that I put into EVERY SINGLE DJANGO PROJECT that I build. You don't
# need to know how they work; the only thing you need to know is that they will make your life INFINITELY
# EASIER. In fact, I would recommend copying them into your new projects when you start work on them after
# the break (don't forget the two imports above).
PROJECT_NAME = 'project'
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# These two settings (normally found further down in the document) can cause very obnoxious problems depending
# on how your computer is set up. Setting them up like this (using the helpful settings tools above) avoids those
# problems and allows things to just magically work. If you copy these into your new project settings files, be
# sure to DELETE THE ONES DJANGO PLACES IN THE SETTINGS FILE BY DEFAULT. THERE SHOULD ONLY BE ONE STATIC_ROOT AND
# TEMPLATE_DIRS SETTING IN YOUR SETTINGS FILE -- AND IT SHOULD BE THESE (rant over).
STATIC_ROOT = os.path.join(SITE_ROOT, '%s/static' % PROJECT_NAME)
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, '%s/templates' % PROJECT_NAME)
)
########## NORMAL DJANGO SETTINGS BEGIN HERE ###########
# Django settings for project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# CHASE NOTE: Remember, we care about the DATABASES setting. For development purposes, just set the engine
# to sqlite3 as below and give your database a name. That's all you need to do!
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'project.sqlite', # Or path to database file if using sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'w$$apzca&q!2&*ewhd6&n)74c#4=^=ccc2jq0h1ybs==phy*qe'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '%s.urls' % PROJECT_NAME
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = '%s.wsgi.application' % PROJECT_NAME
# CHASE NOTE: Remember, we also care about the INSTALLED_APPS setting. Two important things to
# keep in mind here: One, uncomment the lines as noted to enable the admin (if you want it). And two:
# ANY APP YOU CREATE AND WANT TO USE MUST BE ADDED TO THIS LIST.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# Project-specific apps go here
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"chase.davis@gmail.com"
] | chase.davis@gmail.com |
75a5a25eea6bd1dcd30b17c4004f07a9d932559a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2218/60751/271705.py | b7a6a56ab100877eae272baec1913325326980fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | num=input().split(",")
num_=[]
for i in num:
num_.append(int(i))
num_.sort()
print(max(num_[0]*num_[1]*num_[-1],num_[-1]*num_[-2]*num_[-3])) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
6e73aba4d4c121fa20b2d8c1cb4e084034e71658 | 047f45abbdb6e38e36c2c9c920d1e9a7a5702040 | /src/simulations/id_mapper_client.py | 8fda3b72f8359de4989b74db633f47081cacd9d0 | [
"Apache-2.0"
] | permissive | DD-DeCaF/simulations | 4076da1bfd887869ca8c950f4352d632bbfc4fc3 | dab77166f301c0a12e6fed973147fb4add8a62c4 | refs/heads/devel | 2023-01-23T11:32:11.031122 | 2020-05-29T16:46:08 | 2020-05-29T16:46:08 | 73,704,410 | 0 | 2 | Apache-2.0 | 2020-12-08T14:29:46 | 2016-11-14T12:53:53 | Python | UTF-8 | Python | false | false | 1,723 | py | # Copyright 2018 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import requests
from simulations.app import app
from simulations.metrics import API_REQUESTS
from simulations.utils import log_time
logger = logging.getLogger(__name__)
def query_identifiers(object_ids, db_from, db_to):
"""
Call the id mapper service.
:param object_ids: list of identifiers to query
:param db_from: the source of the identifier, e.g. 'kegg'
:param db_to: the destination type of the identifier, e.g. 'bigg'
"""
if len(object_ids) == 0:
return {}
query = json.dumps(
{"ids": object_ids, "dbFrom": db_from, "dbTo": db_to, "type": "Metabolite"}
)
logger.info(
"query id mapper at %s with %s", app.config["ID_MAPPER_API"], str(query)
)
with log_time(operation=f"ID map request for ids: {object_ids}"):
with API_REQUESTS.labels(
"model", os.environ["ENVIRONMENT"], "id-mapper", app.config["ID_MAPPER_API"]
).time():
return requests.post(
f"{app.config['ID_MAPPER_API']}/query", data=query
).json()["ids"]
| [
"ali@kvikshaug.no"
] | ali@kvikshaug.no |
90487f0a3d2431a0d2349c1cf70844a83efb30db | 683a90831bb591526c6786e5f8c4a2b34852cf99 | /HackerRank/Python/Strings/2_String_split_and_join.py | 6cfd65612058b1056b5f246af8f8a8e6cf35edc7 | [] | no_license | dbetm/cp-history | 32a3ee0b19236a759ce0a6b9ba1b72ceb56b194d | 0ceeba631525c4776c21d547e5ab101f10c4fe70 | refs/heads/main | 2023-04-29T19:36:31.180763 | 2023-04-15T18:03:19 | 2023-04-15T18:03:19 | 164,786,056 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | # !/usr/bin/env python3
# https://www.hackerrank.com/challenges/python-string-split-and-join/problem
# tags: cadenas
def split_and_join(str):
lista = str.split(" ")
str = "-".join(lista)
return str
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
| [
"davbetm@gmail.com"
] | davbetm@gmail.com |
4840c3f91c339b220ffb6aad5c14d810d3ee6ff7 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j22163-2157/sdB_galex_j22163-2157_coadd.py | e9b5611ae5b1a826b06a0b20d0720c3ed879a03f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[334.098125,-21.957308], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_galex_j22163-2157/sdB_galex_j22163-2157_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_galex_j22163-2157/sdB_galex_j22163-2157_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
4ac780776fc43fe74e693ab1ef80be166f0114c0 | 690d99023e33928fbf40158822c59075533eaf44 | /pyapprox/tests/test_low_rank_multi_fidelilty.py | a6ced55fcba99473ea4b988b5d586a9b61e0b2ed | [
"MIT"
] | permissive | allevin/pyapprox | 321a16d1831a6f9abdc9ece632556c29425eb5aa | 2351b1818f3b72554bcb6cc72e994c283c9eb752 | refs/heads/master | 2022-11-20T14:45:02.519387 | 2020-07-27T03:23:52 | 2020-07-27T03:23:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,457 | py | import unittest
import numpy as np
from pyapprox.low_rank_multifidelity import *
from functools import partial
from scipy.stats import uniform
import pyapprox as pya
from scipy.special import jv as bessel_function
class OscillatoryPolyLowFidelityModel(object):
def __init__( self, mesh_dof=100, num_terms=35 ):
self.mesh = np.linspace( -1.,1., mesh_dof )
self.num_terms = num_terms
variable = [uniform(-1,2)]
var_trans = pya.AffineRandomVariableTransformation(variable)
self.poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(
var_trans)
self.poly.configure(poly_opts)
self.poly.set_indices(pya.compute_hyperbolic_indices(
1,self.num_terms-1))
def basis_matrix(self):
# compute vandermonde matrix, i.e. all legendre polynomials up
# at most degree self.num_terms
basis_matrix = self.poly.basis_matrix(
self.mesh.reshape(1,self.mesh.shape[0]))
return basis_matrix
def compute_abs_z(self,z):
abs_z = np.absolute(z)
return abs_z
def __call__(self,samples):
z = samples[0,:]
# z in [0,10*pi]
basis_matrix = self.basis_matrix()
coeffs = np.zeros((self.num_terms,samples.shape[1]) ,float )
abs_z = self.compute_abs_z(z)
for k in range( self.num_terms ):
ck = np.exp( np.sign( z )*1j )*1j**k
ck = ck.real
gk = ck * np.sqrt( np.pi*(2.*k+1.)/ abs_z )*\
bessel_function( k+.5, abs_z )
# gk not defined for z=0
coeffs[k,:] = gk
# must divide by sqrt(2), due to using orthonormal basis with
# respect to w=1/2, but needing orthonormal basis with respect
# to w=1
coeffs[k,:] /= np.sqrt(2)
result = np.dot(basis_matrix, coeffs).T
return result
def generate_samples(self,num_samples):
num_vars = 1
return np.random.uniform(0,10.*np.pi,(num_vars,num_samples))
class OscillatoryHighFidelityModel(OscillatoryPolyLowFidelityModel):
def __init__(self,mesh_dof=100,num_terms=35,eps=1e-3):
super().__init__(mesh_dof,num_terms)
self.eps = eps
def compute_abs_z(self,z):
abs_z = np.absolute(z+self.eps*z**2)
return abs_z
class OscillatorySinLowFidelityModel(OscillatoryPolyLowFidelityModel):
def __init__(self,mesh_dof=100,num_terms=35,eps=1e-3):
super().__init__(mesh_dof,num_terms)
self.eps = eps
def basis_matrix(self):
kk = np.arange(self.num_terms)[np.newaxis,:]
basis_matrix = np.sin(np.pi*(kk+1)*self.mesh[:,np.newaxis])
return basis_matrix
class TestLowRankMultiFidelity(unittest.TestCase):
def setUp(self):
np.random.seed(1)
def test_select_nodes(self):
A = numpy.array([[1.,1.,1 ],[1.,2.,5.5],[1.,3.,13.]])
A = np.random.normal(0,1,(3,3))
G = numpy.dot( A.T, A )
pivots, L = select_nodes( A.copy(), A.shape[1] )
numpy_L = numpy.linalg.cholesky( G )
P = numpy.eye(pivots.shape[0])[pivots,:]
assert numpy.allclose( numpy.dot(P,numpy.dot(G,P.T)),
numpy.dot(L,L.T))
assert numpy.allclose(numpy.dot(P.T,numpy.dot(numpy.dot(L,L.T),P)),G)
A = numpy.random.normal( 0.,1., (4, 3) )
G = numpy.dot( A.T, A )
pivots, L = select_nodes( A.copy(), A.shape[1] )
P = numpy.eye(pivots.shape[0])[pivots,:]
numpy_L = numpy.linalg.cholesky( G )
assert numpy.allclose( numpy.dot(P,numpy.dot(G,P.T)),
numpy.dot(L,L.T))
assert numpy.allclose(numpy.dot(P.T,numpy.dot(numpy.dot(L,L.T),P)),G)
def test_select_nodes_cholesky(self):
A = numpy.array([[1.,1.,1 ],[1.,2.,5.5],[1.,3.,13.]])
A = np.random.normal(0,1,(3,3))
G = numpy.dot( A.T, A )
pivots, L = select_nodes_cholesky( A, A.shape[1] )
numpy_L = numpy.linalg.cholesky( G )
P = pya.get_pivot_matrix_from_vector(pivots,G.shape[0])
assert np.allclose(P.dot(G).dot(P.T),L.dot(L.T))
A = numpy.random.normal( 0.,1., (4, 3) )
G = numpy.dot( A.T, A )
pivots, L = select_nodes_cholesky( A, A.shape[1] )
numpy_L = numpy.linalg.cholesky( G )
P = pya.get_pivot_matrix_from_vector(pivots,G.shape[0])
assert np.allclose(P.dot(G).dot(P.T),L.dot(L.T))
def test_select_nodes_update(self):
A = numpy.random.normal( 0.,1., (5, 4) )
G = numpy.dot( A.T, A )
pivots, L = select_nodes( A.copy(), A.shape[1], order=[1,3,0] )
assert numpy.allclose(pivots,[1,3,0,2])
P = numpy.eye(pivots.shape[0])[pivots,:]
numpy_L = numpy.linalg.cholesky( G )
assert numpy.allclose( numpy.dot(P,numpy.dot(G,P.T)),
numpy.dot(L,L.T))
assert numpy.allclose(numpy.dot(P.T,numpy.dot(numpy.dot(L,L.T),P)),G)
def test_oscillatory_model(self):
eps = 1.e-3
mesh_dof=100
K=35
lf_model1 = OscillatoryPolyLowFidelityModel(mesh_dof,K)
lf_model2 = OscillatorySinLowFidelityModel(mesh_dof,K)
hf_model = OscillatoryHighFidelityModel(mesh_dof,100,eps)
lf_model = lf_model2
# for tutorial
# samples = np.array([[5]])
# import matplotlib.pyplot as plt
# fig,axs=plt.subplots(1,2,figsize=(2*8,6))
# hf_model.eps=1e-2
# axs[0].plot(hf_model.mesh,hf_model(samples)[0,:],label='$u_0$')
# hf_model.eps=1e-3
# axs[0].plot(hf_model.mesh,lf_model1(samples)[0,:],label='$u_1$')
# axs[0].plot(hf_model.mesh,lf_model2(samples)[0,:],label='$u_2$')
# axs[0].legend()
# samples = np.linspace(0.01,np.pi*10-0.1,101)[np.newaxis,:]
# hf_model.eps=1e-2
# axs[1].plot(samples[0,:],hf_model(samples)[:,50],label='$u_0$')
# hf_model.eps=1e-3
# axs[1].plot(samples[0,:],lf_model1(samples)[:,50],label='$u_1$')
# axs[1].plot(samples[0,:],lf_model2(samples)[:,50],label='$u_2$')
# plt.show()
# assert False
# number of quantities of interest/outputs
num_QOI = mesh_dof
# number of random paramters/inputs
num_dims = 1
# number of initial candidates/snapshots for low-fidelity model
num_lf_candidates = int(1e4)
# number of interpolations nodes/high-fidelity runs
num_hf_runs = 20
num_test_samples = int(1e3)
test_samples = hf_model.generate_samples(num_test_samples)
hf_test_values = hf_model( test_samples )
lf_test_values = lf_model( test_samples )
mf_model = BiFidelityModel(lf_model,hf_model)
mf_model.build(num_hf_runs,hf_model.generate_samples,
num_lf_candidates)
# regression test. To difficult to compute a unit test
mf_test_values = mf_model( test_samples )
error_mf = compute_mean_l2_error(hf_test_values,
mf_test_values)[1]
assert np.allclose(error_mf,3.0401959914364483e-05)
return
# for tutorial
hf_runs = [i*2 for i in range(1,11)]
error_mf = numpy.empty((len(hf_runs)))
error_lf = numpy.empty((len(hf_runs)))
error_nodes = numpy.empty((len(hf_runs)))
condition = numpy.empty((len(hf_runs)))
for j in range(len(hf_runs)):
num_hf_runs = hf_runs[j]
mf_model = BiFidelityModel(lf_model,hf_model)
mf_model.build(num_hf_runs,hf_model.generate_samples,
num_lf_candidates)
mf_test_values = mf_model( test_samples )
error_mf[j] = compute_mean_l2_error(hf_test_values,
mf_test_values)[1]
error_lf[j] = compute_mean_l2_error(hf_test_values,
lf_test_values)[1]
print ("|hf-lf|", error_lf[j])
print ("|hf-mf|", error_mf[j])
plt.semilogy(hf_runs,error_mf,label=f'$K={K}$')
plt.show()
if __name__== "__main__":
low_rank_mf_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestLowRankMultiFidelity)
unittest.TextTestRunner(verbosity=2).run(low_rank_mf_test_suite)
| [
"29109026+jdjakem@users.noreply.github.com"
] | 29109026+jdjakem@users.noreply.github.com |
a79f048e280ae3dc1c915f26311e81ccf38b9449 | d2dda11e125068512c5c0db0f24b80bc53c94ce3 | /LeetCode/Ex100/Ex110.py | 8d9d965bb2811d6d4ec769cdb289e6704ef5dcb4 | [] | no_license | JasonVann/CrackingCodingInterview | f90163bcd37e08f6a41525f9f95663d5f42dd8e6 | 8f9327a1879949f61b462cc6c82e00e7c27b8b07 | refs/heads/master | 2021-09-02T09:28:34.553704 | 2018-01-01T12:05:12 | 2018-01-01T12:05:12 | 110,519,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py |
class Ex110(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root == None:
return True
(level, ans) = self.helper(root, 1)
return ans
def helper(self, root, level):
if root == None:
return (level-1, True)
else:
(l, lb) = self.helper(root.left, level+1)
(r, rb) = self.helper(root.right, level+1)
if lb == False or rb == False:
return (max(l, r), False)
if abs(l - r) > 1:
return (max(l, r), False)
return (max(l,r), True)
'''
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root:
return True
def depth(node):
if not node: #leaves
return 0
left = depth(node.left) #left child's depth
right = depth(node.right) #right child's depth
if abs(left-right)>1:
raise Exception #stop recursion and report unbalance
return max(left, right)+1
try:
return abs(depth(root.left)-depth(root.right))<=1
except:
return False
'''
'''
public boolean isBalanced(TreeNode root) {
if(root==null){
return true;
}
return height(root)!=-1;
}
public int height(TreeNode node){
if(node==null){
return 0;
}
int lH=height(node.left);
if(lH==-1){
return -1;
}
int rH=height(node.right);
if(rH==-1){
return -1;
}
if(lH-rH<-1 || lH-rH>1){
return -1;
}
return Math.max(lH,rH)+1;
}
'''
| [
"jasonvanet@gmail.com"
] | jasonvanet@gmail.com |
e235cd6eadb51407845cd19a59fb52175c2ed1f0 | 070dc1e2c5643ef9ae80b24f56cf7b6624f65818 | /video_prediction/datasets/__init__.py | e65f64ab71a1ec5a63652990d7761430f7083e2e | [
"MIT"
] | permissive | anestisdotpy/video_prediction | 9091661e9d56460e6d1dab9e1e774a8ff81241bd | e9aecb8171123c1fe673a1f16864e3554c386cc5 | refs/heads/master | 2020-06-23T02:47:24.487425 | 2019-07-23T18:02:32 | 2019-07-23T18:02:32 | 198,482,843 | 0 | 0 | MIT | 2019-07-23T17:59:23 | 2019-07-23T17:59:22 | null | UTF-8 | Python | false | false | 1,099 | py | from .base_dataset import BaseVideoDataset
from .base_dataset import VideoDataset, SequenceExampleVideoDataset, VarLenFeatureVideoDataset
from .google_robot_dataset import GoogleRobotVideoDataset
from .sv2p_dataset import SV2PVideoDataset
from .softmotion_dataset import SoftmotionVideoDataset
from .kth_dataset import KTHVideoDataset
from .ucf101_dataset import UCF101VideoDataset
from .cartgripper_dataset import CartgripperVideoDataset
def get_dataset_class(dataset):
dataset_mappings = {
'google_robot': 'GoogleRobotVideoDataset',
'sv2p': 'SV2PVideoDataset',
'softmotion': 'SoftmotionVideoDataset',
'bair': 'SoftmotionVideoDataset', # alias of softmotion
'kth': 'KTHVideoDataset',
'ucf101': 'UCF101VideoDataset',
'cartgripper': 'CartgripperVideoDataset',
}
dataset_class = dataset_mappings.get(dataset, dataset)
dataset_class = globals().get(dataset_class)
if dataset_class is None or not issubclass(dataset_class, BaseVideoDataset):
raise ValueError('Invalid dataset %s' % dataset)
return dataset_class
| [
"alexleegk@gmail.com"
] | alexleegk@gmail.com |
78221bbf47e616235915cfb1dc10c45637dc777a | bd741fe3909ae9260232b724cbb395823cf68834 | /python/setup.py | ba010859a7003260e868f6d6f749200202ce1755 | [
"MIT"
] | permissive | simondlevy/NengoCPP | a1b235dd37889c0cf8a2a2b3e6f693a27d379174 | ae09d4e96d866dd4a3748867cf8eb0df5d0acc8f | refs/heads/master | 2020-03-21T19:38:06.204688 | 2019-06-15T18:17:00 | 2019-06-15T18:17:00 | 138,961,016 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!/usr/bin/env python
'''
Python distutils setup for for NengoCPP example
Copyright 2018 Simon D. Levy
MIT License
'''
from distutils.core import setup
setup (name = 'nengo_pidcontrol',
version = '0.1',
install_requires = ['PIL'],
description = '',
py_modules = ['nengo_pidcontrol',],
author='Simon D. Levy',
author_email='simon.d.levy@gmail.com',
license='MIT',
platforms='Linux; Windows; OS X'
)
| [
"simon.d.levy@gmail.com"
] | simon.d.levy@gmail.com |
51f2f9f4db7a0a374df9c157a737b8e90be9f0fe | 1f68b6f9f55afaa7cb32df262f4fe0864472da05 | /人工智能/人工智能入门/数据可视化/die_visual.py | 6c8f873b98271fc5d514ee8841c2c8b03fcd0baa | [] | no_license | faker-hong/testOne | 7c4496362cb5495c25c640076102fe0704f8552f | 768edc4a5526c8972fec66c6a71a38c0b24a1451 | refs/heads/master | 2022-12-04T14:47:53.614685 | 2020-10-30T03:17:50 | 2020-10-30T03:17:50 | 196,514,862 | 1 | 0 | null | 2022-11-22T02:43:32 | 2019-07-12T05:35:09 | Python | UTF-8 | Python | false | false | 498 | py | from 数据可视化.die import Die
import pygal
die = Die()
result = []
frequenices = []
for i in range(100):
re = die.roll()
result.append(re)
for value in range(1,die.num_size+1):
frequency = result.count(value)
frequenices.append(frequency)
print(frequenices)
#对结果进行可视化
hist = pygal.Bar()
hist.title='97'
hist.x_labels = [1, 2, 3, 4, 5, 6]
hist.x_title = "result"
hist.y_title = "frequency"
hist.add('D6', frequenices)
hist.render_to_file('die_visual.svg') | [
"42666723+hongcheng97@users.noreply.github.com"
] | 42666723+hongcheng97@users.noreply.github.com |
91d6b6ddd88603208daece87feef4b07ccad2846 | 1408ac8b2ed54ec7a7a8e3660bbb7389fbade037 | /Auditions/AuditionLevel/AuditionLevel.py | baf76f63f292391175f556c99955d745aa9117b9 | [] | no_license | Decennium/CodeCombat | 72a2df7288c5d5c3cb88e08425cc0e6507f7a1d5 | 068b937ae7cfc58565f2c5e1f50281c50808ccf1 | refs/heads/master | 2021-08-23T19:37:53.160014 | 2017-12-06T07:52:19 | 2017-12-06T07:52:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | # http://codecombat.com/play/level/audition-level
summonTypes = ['paladin']
def summonTroops():
type = summonTypes[len(hero.built) % len(summonTypes)]
if hero.gold > hero.costOf(type):
hero.summon(type)
def commandTroops():
for index, friend in enumerate(hero.findFriends()):
if friend.type == 'paladin':
CommandPaladin(friend)
def CommandPaladin(paladin):
if (paladin.canCast("heal")):
if (hero.health < hero.maxHealth * 0.6):
target = self
if target:
hero.command(paladin, "cast", "heal", target)
elif (paladin.health < 100):
hero.command(paladin, "shield")
else:
target = hero.findNearestEnemy()
hero.command(paladin, "attack", target)
def moveTo(position, fast=True):
if (hero.isReady("jump") and hero.distanceTo(position) > 10 and fast):
hero.jumpTo(position)
else:
hero.move(position)
def attack(target):
if target:
if (hero.distanceTo(target) > 10):
moveTo(target.pos)
elif (hero.isReady("bash")):
hero.bash(target)
elif (hero.canCast('chain-lightning', target)):
hero.cast('chain-lightning', target)
elif (hero.isReady("attack")):
hero.attack(target)
else:
pass
while True:
flag = hero.findFlag()
summonTroops()
commandTroops()
if flag:
hero.pickUpFlag(flag)
else:
enemy = hero.findNearestEnemy()
if enemy:
attack(enemy)
# find some enemy to attack
# use cleave when ready
| [
"vadim-job-hg@yandex.ru"
] | vadim-job-hg@yandex.ru |
eb9beae6dd40e83e2260ddf0a7862e298b36fe3a | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/storagesync/v20170605preview/get_sync_group.py | f3f917cbf35db452cf5c6afd92530a503ff78d3f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,972 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSyncGroupResult',
'AwaitableGetSyncGroupResult',
'get_sync_group',
]
@pulumi.output_type
class GetSyncGroupResult:
"""
Sync Group object.
"""
def __init__(__self__, id=None, name=None, sync_group_status=None, type=None, unique_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sync_group_status and not isinstance(sync_group_status, str):
raise TypeError("Expected argument 'sync_group_status' to be a str")
pulumi.set(__self__, "sync_group_status", sync_group_status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
pulumi.set(__self__, "unique_id", unique_id)
@property
@pulumi.getter
def id(self) -> str:
"""
The id of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="syncGroupStatus")
def sync_group_status(self) -> str:
"""
Sync group status
"""
return pulumi.get(self, "sync_group_status")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> Optional[str]:
"""
Unique Id
"""
return pulumi.get(self, "unique_id")
class AwaitableGetSyncGroupResult(GetSyncGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSyncGroupResult(
id=self.id,
name=self.name,
sync_group_status=self.sync_group_status,
type=self.type,
unique_id=self.unique_id)
def get_sync_group(resource_group_name: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
sync_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSyncGroupResult:
"""
Sync Group object.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
:param str sync_group_name: Name of Sync Group resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['storageSyncServiceName'] = storage_sync_service_name
__args__['syncGroupName'] = sync_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20170605preview:getSyncGroup', __args__, opts=opts, typ=GetSyncGroupResult).value
return AwaitableGetSyncGroupResult(
id=__ret__.id,
name=__ret__.name,
sync_group_status=__ret__.sync_group_status,
type=__ret__.type,
unique_id=__ret__.unique_id)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
a28abb90239b0ec78dbdbbcb02137ed32a1ca25a | 6371acdb640e62e4e6addac2ba1aa70002a8c1b1 | /Algorithms/pySINDy/env/lib/python3.6/site-packages/ipyparallel/tests/test_mongodb.py | 81b2b171b88ba68c65515ad28880228e24bdef97 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | M-Vause/SEED | 263307152ebac1e4f49cd81dcd5207ecbdf51139 | cda94a02a5ef47a1e9a885d330eef2821301ebed | refs/heads/master | 2022-12-13T20:11:58.893994 | 2020-04-27T16:10:09 | 2020-04-27T16:10:09 | 252,790,026 | 3 | 3 | MIT | 2022-12-08T01:52:05 | 2020-04-03T16:55:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,395 | py | """Tests for mongodb backend"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from unittest import TestCase
import pytest
from . import test_db
c = None
@pytest.fixture(scope='module')
def mongo_conn(request):
global c
try:
from pymongo import MongoClient
except ImportError:
pytest.skip("Requires mongodb")
conn_kwargs = {}
if 'DB_IP' in os.environ:
conn_kwargs['host'] = os.environ['DB_IP']
if 'DBA_MONGODB_ADMIN_URI' in os.environ:
# On ShiningPanda, we need a username and password to connect. They are
# passed in a mongodb:// URI.
conn_kwargs['host'] = os.environ['DBA_MONGODB_ADMIN_URI']
if 'DB_PORT' in os.environ:
conn_kwargs['port'] = int(os.environ['DB_PORT'])
try:
c = MongoClient(**conn_kwargs)
except Exception:
c = None
if c is not None:
request.addfinalizer(lambda : c.drop_database('iptestdb'))
return c
@pytest.mark.usefixture('mongo_conn')
class TestMongoBackend(test_db.TaskDBTest, TestCase):
"""MongoDB backend tests"""
def create_db(self):
try:
from ipyparallel.controller.mongodb import MongoDB
return MongoDB(database='iptestdb', _connection=c)
except Exception:
pytest.skip("Couldn't connect to mongodb")
| [
"58262117+M-Vause@users.noreply.github.com"
] | 58262117+M-Vause@users.noreply.github.com |
a4609ee7d40e60103c1c6d4a1f9cb1b4d861a532 | 45ba55b4fbdaf1657fde92beaeba4f173265afcd | /strawberry/fastapi/context.py | 6a23a0e648a9cb7d607eb89ef9083ac1c2b8e39a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | strawberry-graphql/strawberry | af96afd4edd1788c59e150597a12501fbc7bf444 | 6d86d1c08c1244e00535840d9d87925431bc6a1c | refs/heads/main | 2023-08-30T03:34:12.929874 | 2023-08-24T12:01:09 | 2023-08-24T12:01:09 | 162,690,887 | 3,408 | 529 | MIT | 2023-09-14T21:49:44 | 2018-12-21T08:56:55 | Python | UTF-8 | Python | false | false | 662 | py | from typing import Any, Dict, Optional, Union
from starlette.background import BackgroundTasks
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
CustomContext = Union["BaseContext", Dict[str, Any]]
MergedContext = Union[
"BaseContext", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]
]
class BaseContext:
connection_params: Optional[Any] = None
def __init__(self) -> None:
self.request: Optional[Union[Request, WebSocket]] = None
self.background_tasks: Optional[BackgroundTasks] = None
self.response: Optional[Response] = None
| [
"noreply@github.com"
] | strawberry-graphql.noreply@github.com |
d40d835030a9e00608bc1155ed3b3db4ca2779e6 | 033da72a51c76e5510a06be93229a547a538cf28 | /Data Engineer with Python Track/22. Transactions and Error Handling in SQL Server/Chapter/03. Transactions in SQL Server/10-Doomed transactions.py | 29523ae21c339f0d22cbec5dee2878d9afa29992 | [] | no_license | ikhwan1366/Datacamp | d5dcd40c1bfeb04248977014260936b1fb1d3065 | 7738614eaebec446842d89177ae2bc30ab0f2551 | refs/heads/master | 2023-03-06T13:41:06.522721 | 2021-02-17T22:41:54 | 2021-02-17T22:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | '''
Doomed transactions
You want to insert the data of two new customers into the customer table. You prepare a script controlling that if an error occurs, the transaction rollbacks and you get the message of the error. You want to control it using XACT_ABORT in combination with XACT_STATE.
Instructions
100 XP
- Use the appropriate setting of XACT_ABORT.
- Check if there is an open transaction.
- Rollback the transaction.
- Select the error message.
'''
-- Use the appropriate setting
SET XACT_ABORT OFF;
BEGIN TRY
BEGIN TRAN;
INSERT INTO customers VALUES('Mark', 'Davis', 'markdavis@mail.com', '555909090');
INSERT INTO customers VALUES('Dylan', 'Smith', 'dylansmith@mail.com', '555888999');
COMMIT TRAN;
END TRY
BEGIN CATCH
-- Check if there is an open transaction
IF XACT_STATE() <> 0
-- Rollback the transaction
ROLLBACK TRAN;
-- Select the message of the error
SELECT ERROR_MESSAGE() AS Error_message;
END CATCH
| [
"surel.chandrapratama@gmail.com"
] | surel.chandrapratama@gmail.com |
cb33f242a73fc2923fcc835d8c1946a430e07cbc | 2136dd727f15133b2ee000dadcfa44b7e29d3ff4 | /Day5/dengluTest.py | 0572c98a88c15ab45b23d6226e2407d6d0ab4da9 | [] | no_license | zhaolanxiang113/Weekend112 | b6f56627a17d279db19ed6b409333d8c14c6bb67 | 74f3b54b246ff43a773c8b832fd9ab3af38fd509 | refs/heads/master | 2021-08-23T04:13:52.933570 | 2017-12-03T07:11:49 | 2017-12-03T07:11:49 | 112,907,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | import unittest
from selenium import webdriver
import time
class DengLuTest(unittest.TestCase):
# 3个双引号表示文档字符串,也是一种注释,会显示的文档中;
"""登录模块测试用例"""
def setUp(self):
# 打开浏览器
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
# 浏览器升级后,要注销最大化;要想使用必须满足浏览器的版本和driver的版本必须匹配
self.driver.maximize_window()
def tearDown(self):
time.sleep(5)
self.driver.quit()
def test_denglu(self):
"""登录测试正常测试用例"""
driver = self.driver
driver.get("http://localhost/index.php?m=user&c=public&a=login")
driver.find_element_by_id("username").send_keys("testing")
driver.find_element_by_id("password").send_keys("testing123")
driver.find_element_by_class_name("login_btn").click()
print("当前用户名:testing")
| [
"51Testing"
] | 51Testing |
e5be76fbbcdcd5660d468cf6ecba45afdcd662ab | 750d8ade6abc2b3bd6a24e660a4992114db6ac0c | /lib/music/gui/window.py | de2e80e9ea32e5895ffd45bdfb2ec12e9216241b | [] | no_license | dskrypa/music_manager | 8a00a4bd7b32a87dab2441614c94346fa87c4f13 | ad7265fbd203962a4bf9cf6444c8e10d561a307c | refs/heads/main | 2023-08-09T06:26:46.592118 | 2023-08-08T11:38:08 | 2023-08-08T11:38:08 | 234,730,172 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | """
Extended Window class from PySimpleGUI
:author: Doug Skrypa
"""
import logging
import signal
from typing import Callable
from weakref import WeakSet
from PySimpleGUI import Window as _Window
from .utils import FinishInitMixin
__all__ = ['Window']
log = logging.getLogger(__name__)
class Window(_Window):
__registered_sigint_handler = None
__instances = WeakSet()
def __init__(self, *args, finalize_callback: Callable = None, **kwargs):
if self.__registered_sigint_handler is None:
self.register_sigint_handler()
self._finalize_callback = finalize_callback
super().__init__(*args, **kwargs)
self.__instances.add(self)
def _sigint_fix(self):
"""Continuously re-registers itself to be called every 250ms so that Ctrl+C is able to exit tk's mainloop"""
self.TKroot.after(250, self._sigint_fix)
def finalize(self):
super().finalize()
FinishInitMixin.finish_init_all()
self.TKroot.after(250, self._sigint_fix)
if (callback := self._finalize_callback) is not None:
callback()
return self
Finalize = finalize
@classmethod
def unregister_sigint_handler(cls):
if cls.__registered_sigint_handler:
signal.signal(signal.SIGINT, signal.SIG_DFL)
cls.__registered_sigint_handler = False
@classmethod
def register_sigint_handler(cls):
log.debug('Registering Window._handle_sigint to handle SIGINT')
signal.signal(signal.SIGINT, Window._handle_sigint)
cls.__registered_sigint_handler = True
@classmethod
def _handle_sigint(cls, *args):
"""
With just the _sigint_fix loop, the tkinter stdlib python code ignores SIGINT - this is required to actually
handle it immediately.
"""
for inst in cls.__instances:
try:
inst.write_event_value(None, None)
except AttributeError:
pass
def is_maximized(self) -> bool:
return self.TKroot.state() == 'zoomed'
| [
"dskrypa@gmail.com"
] | dskrypa@gmail.com |
ab8fca3ba9b9481bf76c0f4f3e210cfeb9dd2c62 | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/slb/slb_health_stat_oper.py | d25b96bf06dca0d98f8468c55a6dbd2e3769db38 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 3,562 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class HealthCheckList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param status: {"type": "string", "format": "string"}
:param retries: {"type": "number", "format": "number"}
:param down_state: {"type": "number", "format": "number"}
:param up_retries: {"type": "number", "format": "number"}
:param down_cause: {"type": "number", "format": "number"}
:param partition_id: {"type": "number", "format": "number"}
:param up_cause: {"type": "number", "format": "number"}
:param ip_address: {"type": "string", "format": "string"}
:param total_retry: {"type": "number", "format": "number"}
:param health_monitor: {"type": "string", "format": "string"}
:param port: {"type": "string", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "health-check-list"
self.DeviceProxy = ""
self.status = ""
self.retries = ""
self.down_state = ""
self.up_retries = ""
self.down_cause = ""
self.partition_id = ""
self.up_cause = ""
self.ip_address = ""
self.total_retry = ""
self.health_monitor = ""
self.port = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param health_check_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"status": {"type": "string", "format": "string"}, "retries": {"type": "number", "format": "number"}, "down-state": {"type": "number", "format": "number"}, "up-retries": {"type": "number", "format": "number"}, "down-cause": {"type": "number", "format": "number"}, "partition-id": {"type": "number", "format": "number"}, "up-cause": {"type": "number", "format": "number"}, "ip-address": {"type": "string", "format": "string"}, "total-retry": {"type": "number", "format": "number"}, "health-monitor": {"type": "string", "format": "string"}, "optional": true, "port": {"type": "string", "format": "string"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.health_check_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class HealthStat(A10BaseClass):
"""Class Description::
Operational Status for the object health-stat.
Class health-stat supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/health-stat/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "health-stat"
self.a10_url="/axapi/v3/slb/health-stat/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
36121fdf1958177b680c4adf4f0ba290b78f8c7d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1102.py | f45a308ef04109b1a46b0769d4274aa0069c4829 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import numpy as np
import sys
#dat=open("small.in").readlines()
dat=open("A-small-attempt1.in").readlines()
ntests=int(dat[0])
i=1
for j in xrange(ntests):
ans0=int(dat[i].strip('\n'))
i=i+1
grid0=np.array([[int(y) for y in x.strip('\n').split(' ')] for x in dat[i:i+4]])
i=i+4
ans1=int(dat[i].strip('\n'))
i=i+1
grid1=np.array([[int(y) for y in x.strip('\n').split(' ')] for x in dat[i:i+4]])
i=i+4
#Possibilites after first
first_row=grid0[ans0-1,]
#Possibilities after second
second_row=grid1[ans1-1,]
#Which leaves...
both=np.intersect1d(first_row,second_row)
#print first_row,second_row,both
if len(both)==1:
print "Case #%d: %d"%(j+1,both[0])
elif len(both)==0:
print "Case #%d: Volunteer cheated!"%(j+1)
else:
print "Case #%d: Bad magician!"%(j+1)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
1187703e378de2de9f8a1d7635d3f3518db4f20b | 16136f6f9578358ad6ff00101831978d20a43926 | /bhch01/bhch01exrc04.py | 01f8e5664a74cd9e694bf4ac3f433ca4f0359b92 | [] | no_license | Yaachaka/pyPractice1 | 567c0f8e62cb4f6bff66f1f50672a2ffbc57eeee | fcd4deda3d1094c91ef228b36dfb6124cfa86a8b | refs/heads/main | 2023-06-15T17:14:59.697340 | 2021-07-07T05:01:20 | 2021-07-07T05:01:20 | 331,349,117 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | print('(512-282)/(47.48+5)=',(512-282)/(47*48+5))
| [
"rosaarjuna@gmail.com"
] | rosaarjuna@gmail.com |
b0cf7d3b3f1d9800be47e1905e36043ddd16474c | febcab8c3bbcccfcaa18d2168353a7897a35fe80 | /bliski_publikator/institutions/migrations/0006_institution_monitorings.py | 909ae2d96f0d4b90900aa9f003ddf6033efe5b69 | [
"MIT",
"BSD-3-Clause"
] | permissive | watchdogpolska/bliski_publikator | 256d6e4e24bb7ea9821c409b491dd24e70755082 | f67ec8edf16f803ceef1a1d1f5a2b4699117895c | refs/heads/master | 2020-12-25T17:00:27.555894 | 2018-08-13T20:02:51 | 2018-08-13T20:02:51 | 55,450,877 | 1 | 3 | MIT | 2018-08-13T20:02:52 | 2016-04-04T22:46:57 | Python | UTF-8 | Python | false | false | 632 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-09 00:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitorings', '0008_auto_20160509_0031'),
('institutions', '0005_remove_institution_monitorings'),
]
operations = [
migrations.AddField(
model_name='institution',
name='monitorings',
field=models.ManyToManyField(blank=True, through='monitorings.MonitoringInstitution', to='monitorings.Monitoring', verbose_name='Monitorings'),
),
]
| [
"naczelnik@jawnosc.tk"
] | naczelnik@jawnosc.tk |
0b9567943df58092230d0f1d716d86717c9d7c22 | aef9d6b8bb21957fa8b2235872bca51f64e7b5ff | /django101/todoapp/migrations/0001_initial.py | 0b50bc857d2c64737fe726df40e616c51bf93192 | [] | no_license | dreadlordow/Softuni-Python-Web | 3cf9cc234960bb47f1c3c2a91a1a80d0fc499fd6 | 784faccbe15023536917d610384222d839a63bae | refs/heads/master | 2023-08-28T19:39:57.149514 | 2021-02-23T16:28:55 | 2021-02-23T16:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | # Generated by Django 3.1.5 on 2021-01-16 22:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField()),
('is_done', models.BooleanField()),
],
),
]
| [
"georgipavlov1913@gmail.com"
] | georgipavlov1913@gmail.com |
617d539ddc913989973f9e820c92c115d270af23 | a9e15e6bdaa45c9fdd667f5f58a537a44d5bd67c | /diy/4.py | 53349c084629084a5da41dc72bd7751b99e6f184 | [] | no_license | tawateer/leetcode | 782f4f8b4a0730ec990d3c5d0e3d80cc4792f0b3 | 0ec784dc1da2577b823977fd858f4d55a059f327 | refs/heads/master | 2020-04-22T03:31:02.668858 | 2019-10-25T09:20:31 | 2019-10-25T09:20:31 | 170,089,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | #!/bin/env python
# -*- coding:utf-8 -*-
"""
在 1 到 n 的数字中,有且只有唯一的一个数字 m 重复出现了,其它的数字都只出现一次。请把这个数字找出来、
"""
class Solution(object):
def findDuplicate(self, nums):
target = 0
for i in nums:
target ^= i
for i in range(1, len(nums)):
target ^= i
return target
s = Solution()
print s.findDuplicate([1, 2, 3, 4, 4])
print s.findDuplicate([1, 2, 2, 3, 4])
| [
"liningning@wandoujia.com"
] | liningning@wandoujia.com |
05449101e333cfaae259ede36d115e888e4a2ab9 | f20e3f75644ce8eb718c22ac9800e41aa4da90dc | /round C/wall.py | e3c88e8292dadbaeef7263246467efc199a96305 | [] | no_license | rocket3989/KickStart2020 | 7de504d88c00c86e1d394a797f01d2682826e4bf | 513c4e6305b31e16350715840e35903b3fcdd89a | refs/heads/master | 2021-04-13T02:38:44.703426 | 2020-09-28T21:02:25 | 2020-09-28T21:02:25 | 249,129,624 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from collections import defaultdict
for tc in range(int(input())):
R, C = [int(x) for x in input().split()]
wall = []
for i in range(R):
wall.append(input().strip())
above = defaultdict(set)
below = defaultdict(set)
for r in range(R):
for c in range(C):
el = wall[r][c]
if r > 0:
if wall[r - 1][c] != el:
above[el].add(wall[r - 1][c])
if r < R - 1:
if wall[r + 1][c] != el:
below[el].add(wall[r + 1][c])
candidates = set(wall[-1])
order = []
placed = set()
while candidates:
for candidate in list(candidates):
for other in below[candidate]:
if other not in placed:
candidates.remove(candidate)
break
else:
order.append(candidate)
candidates.remove(candidate)
placed.add(candidate)
for val in above[candidate]:
candidates.add(val)
if len(order) == len(below):
print("Case #{}: {}".format(tc + 1, ''.join(order)))
else:
print("Case #{}: {}".format(tc + 1, -1))
| [
"rocket3989@gmail.com"
] | rocket3989@gmail.com |
19fb49bd9b20c0ef2ec5647f9caaecd108159748 | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /graph_objs/ohlc/_hoverlabel.py | 52a6fb8ffafbaae5884b9a8998c904850d90156e | [
"MIT"
] | permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,655 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "ohlc"
_path_str = "ohlc.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"split",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.ohlc.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
new_plotly.graph_objs.ohlc.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# split
# -----
@property
def split(self):
"""
Show hover information (open, close, high, low) in separate
labels.
The 'split' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["split"]
@split.setter
def split(self, val):
self["split"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
split
Show hover information (open, close, high, low) in
separate labels.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
split=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.ohlc.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
split
Show hover information (open, close, high, low) in
separate labels.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.ohlc.Hoverlabel
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.ohlc.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
_v = arg.pop("split", None)
_v = split if split is not None else _v
if _v is not None:
self["split"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"wwwidonja@gmail.com"
] | wwwidonja@gmail.com |
91c9505ee1c1e349d16a2b7cac17bfa2367e0f21 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /mediastore-data_write_1/object_delete.py | dd3266bb35468f0cb8fd5427b86b4d182020811a | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/delete-object.html
if __name__ == '__main__':
"""
describe-object : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/describe-object.html
get-object : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/get-object.html
put-object : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/put-object.html
"""
parameter_display_string = """
# path : The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("mediastore-data", "delete-object", "path", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
2832aa7a7a0496dee9f4feb0edf79292b112e86f | 69a43560780342b466360687099eb593de9fec93 | /test.py | 830e69dedc40bbdfdb7df194d4a0d9b276daa0ba | [] | no_license | JamCrumpet/email_generator | e35f7f99654a189f0a6aff832ca6207df13b6a4e | bdb96cd4b02069bce5e1d6d2e70e0233ec7cd71c | refs/heads/master | 2023-01-10T02:06:44.579407 | 2020-11-10T13:38:55 | 2020-11-10T13:38:55 | 287,095,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | #filename = "female_first_names.txt"
#with open(filename) as file_object:
# lines = file_object.readlines()
#for line in lines:
# print(line.split())
#with open(filename,"r") as file_object:
# """ Removes numbers and percentage symbol from text file """
# contents = file_object.read()
# print(contents.replace(str(9),"").replace(str(8), "").replace(str(7),"").replace(str(6), "").replace(str(5), "")\
# .replace(str(4), "").replace(str(3), "").replace(str(2), "").replace(str(1), "").replace(str(0), "")\
# .replace("%", "").lstrip("\t").replace(" ", ""))
#class Female_email():
# """An email address with a female name"""
# def __init__(self,female_first_name,last_name,domain):
# self.female_first_name = rd_female_first_name
# self.last_name = rd_last_name
# self.domain = rd_domain
#self.rd1 = female_first_name + last_name + "@" + domain
#self.rd2 = female_first_name + "." + last_name + "@" + domain
# def random_femail_email(self):
# print(random.choice(self.rd1, self.rd2))
#print("Randomly generated email:")
#Female_email.random_femail_email()
#class Male_email():
# """An email address with a male name"""
# def __init___(self,male_first_name,last_name,domain):
# self.male_first_name = male_first_name
# self.last_name = last_name
# self.domain = domain
#print(rd_male_first_name + rd_last_name + "@" +rd_domain)
import pandas as pd
import random
# read CSV files and saves as dataframes
df_domains = pd.read_csv("domains.csv")
df_female_first_name = pd.read_csv("female_first_names.csv")
df_last_names = pd.read_csv("last_names.csv")
df_male_first_name = pd.read_csv("male_first_names.csv")
# extract necessary columns
column_domains = df_domains["domain"]
column_female_first_name = df_female_first_name["name"]
column_last_name = df_last_names["lastname"]
column_male_first_name = df_male_first_name["name"]
# pick random values from column
rd_domain = random.choice(column_domains)
rd_female_first_name = random.choice(column_female_first_name)
rd_last_name = random.choice(column_last_name)
rd_male_first_name = random.choice(column_male_first_name)
symbols = ["-", "_", "."]
# Random emails with female first name
rd_fe1 = rd_female_first_name + rd_last_name + "@" + rd_domain
rd_fe2 = rd_female_first_name + str(random.randrange(81,99)) + "@" + rd_domain
rd_fe3 = rd_female_first_name + random.choice(symbols) + rd_last_name + "@" + rd_domain
rd_fe = rd_fe1, rd_fe2, rd_fe3
# Random email with male first name
rd_me1 = rd_male_first_name + rd_last_name + "@" + rd_domain
rd_me2 = rd_male_first_name + str(random.randrange(81,99)) + "@" + rd_domain
rd_me3 = rd_male_first_name + random.choice(symbols) + rd_last_name + "@" + rd_domain
rd_me = rd_me1, rd_me2, rd_me3
print('''
Email Generator
==================
''')
length = input('email total?')
length = int(length)
print('\nhere are your passwords:')
for email in str(1):
email_address = ''
for c in range(length):
email_address += random.choice(rd_fe) + "\n"
print(email_address + "\n")
############
def femail_email_genrator():
prompt = "Type y to generate an email / type quit to cancel code"
message = ""
while message != "quit":
message = input(prompt)
if message != "quit":
for email in rd_fe:
print(random.choice(rd_fe))
femail_email_genrator()
#########
# Random email with male first name
rd_me1 = rd_male_first_name + rd_last_name + "@" + rd_domain
rd_me2 = rd_male_first_name + str(random.randrange(81,99)) + "@" + rd_domain
rd_me3 = rd_male_first_name + random.choice(symbols) + rd_last_name + "@" + rd_domain
rd_me = rd_me1, rd_me2, rd_me3 | [
"noreply@github.com"
] | JamCrumpet.noreply@github.com |
c14e4ab6c989ae7577fcc2e88f867fe373ad43d9 | 0528a8b2cbdcb3f64ce8183aa04fe8a515f5801a | /libcms/settings.py | 5afb31eeba5e5724f544de83f18866ed7edc06ea | [] | no_license | wd5/talk.arbicon | 52c17578eaeaaeab0ae620cf101b2b07eb63b9d9 | 12199997447c051612c84dcb2febcde2386fcbb6 | refs/heads/master | 2021-01-17T23:37:22.793853 | 2012-06-04T06:54:56 | 2012-06-04T06:54:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,786 | py | # -*- coding: utf-8 -*-
import os
import sys
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__)) + '/'
sys.path.insert(0, os.path.join(PROJECT_PATH, "apps"))
sys.path.insert(0, os.path.join(PROJECT_PATH, "vendors"))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Moscow'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'ru-RU'
gettext = lambda s: s
LANGUAGES = (
('ru', gettext('Russian')),
('en', gettext('English')),
# ('tt', _('Tatar')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# )),
# 'django.template.loaders.eggs.Loader',
)
#TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# 'django.template.loaders.eggs.Loader',
# )
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
#'django.contrib.messages.context_processors.messages',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'libcms.urls'
AUTHENTICATION_BACKENDS = (
'arbicon.auth.ldap_auth_backend.LdapBackend',
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
# vendor apps
'mptt',
'guardian',
'debug_toolbar',
# cms apps
'core',
'index',
'accounts',
'pages',
'forum',
'arbicon',
'arbicon_polls',
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# guardian settings
ANONYMOUS_USER_ID = -1
LOGIN_REDIRECT_URL = "/"
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
#LOCALE_INDEPENDENT_PATHS = (
# r'^/$',
#)
from local_settings import * | [
"dostovalov@gmail.com"
] | dostovalov@gmail.com |
d3d91f91295649651652951a0785ed15a2a47eb0 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /kiX7WjSFeTmBYcEgK_1.py | 44b53df8479d2eca5cb4445ea4294227621ff84c | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | """
Create a function that takes an integer list and return the biggest between
**positive** sum, **negative** sum, or **0s** count. The major is understood
as the greatest absolute.
`l = [1,2,3,4,0,0,-3,-2]`, the function has to return `10`, because:
* Positive sum = 1+2+3+4 = 10
* Negative sum = (-3)+(-2) = -5
* 0s count = 2 (there are two zeros in list)
### Examples
major_sum([1, 2, 3, 4, 0, 0, -3, -2]) ➞ 10
major_sum([-4, -8, -12, -3, 4, 7, 1, 3, 0, 0, 0, 0]) ➞ -27
major_sum([0, 0, 0, 0, 0, 1, 2, -3]) ➞ 5
# Because -3 < 1+2 < 0sCount = 5
### Notes
* All numbers are integers.
* There aren't empty lists.
* All tests are made to return only one value.
"""
def major_sum(lst):
pos = sum([i for i in lst if i > 0])
neg = sum([i for i in lst if i < 0])
zero = lst.count(0)
if abs(neg) > pos and abs(neg) > zero :
return neg
else:
return max(pos, zero)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
3e49c8234f79b043d8c5e6726a96bc29aeb2f000 | 9ee6068d08a416a2eff7d05095fc664b54416b94 | /aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/GetOfficePreviewURLRequest.py | 78c2487e35064842b98a5bc881416a2897bc15a2 | [
"Apache-2.0"
] | permissive | zhangjing57/aliyun-openapi-python-sdk | 4ea996e1065a697e87c661cd6389072f7611415c | c633e383faa8920fa94261392a7cfa94bc5c11fe | refs/heads/master | 2021-06-02T06:58:30.553558 | 2020-04-09T08:22:45 | 2020-04-09T08:22:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class GetOfficePreviewURLRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'GetOfficePreviewURL','imm')
def get_SrcType(self):
return self.get_query_params().get('SrcType')
def set_SrcType(self,SrcType):
self.add_query_param('SrcType',SrcType)
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project)
def get_UseHTTPS(self):
return self.get_query_params().get('UseHTTPS')
def set_UseHTTPS(self,UseHTTPS):
self.add_query_param('UseHTTPS',UseHTTPS)
def get_SrcUri(self):
return self.get_query_params().get('SrcUri')
def set_SrcUri(self,SrcUri):
self.add_query_param('SrcUri',SrcUri) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
1e6a6bbec7aebbcf5b92eda2fb644ada06f33d6e | 9ee3ef54384da3e08ad367dfc2a350d487caf0ee | /home/migrations/0003_auto_20210101_1212.py | 27a829564276bb52f40673aa392b6a1d46f6915d | [] | no_license | crowdbotics-apps/test-app-for-segmen-17778 | c9cc0d1d5baffeb456e96a76c6ffdcfe642efeb8 | 9b012d3e3c018871b1a86fc4935bace98b962c79 | refs/heads/master | 2023-02-08T13:06:52.480483 | 2021-01-04T13:32:52 | 2021-01-04T13:32:52 | 325,950,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | # Generated by Django 2.2.17 on 2021-01-01 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_load_initial_data"),
]
operations = [
migrations.AddField(
model_name="homepage",
name="model_field_with_long_text_to_display_username",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="homepage",
name="model_field_with_long_text_to_display_username_email",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="homepage",
name="short_name",
field=models.CharField(blank=True, max_length=256, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9edeee0e781e816f4161d110dfa715c3bc794f1d | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/infra/rsresdatetimeformat.py | fd1c1cce839f1e26e0a2c597d8372e7bdc6f7023 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,472 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsResDatetimeFormat(Mo):
"""
A source relation to the date/time format policy.
"""
meta = SourceRelationMeta("cobra.model.infra.RsResDatetimeFormat", "cobra.model.datetime.Format")
meta.cardinality = SourceRelationMeta.ONE_TO_ONE
meta.moClassName = "infraRsResDatetimeFormat"
meta.rnFormat = "rsresDatetimeFormat"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Datetime Format"
meta.writeAccessMask = 0x100000000001
meta.readAccessMask = 0x100000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.infra.Infra")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsresDatetimeFormat', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 15707, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4534
prop.defaultValueStr = "datetimeFormat"
prop._addConstant("datetimeFormat", None, 4534)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 15706, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
3309039c811223dff99454e678e10f02a554f17c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/159/64269/submittedfiles/testes.py | 2ada4d068b2b66630f6bf6f34567c3d2085f78ca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # -*- coding: utf-8 -*-
import numpy as np
'''
'''
#Essa função tem como parametros os graus, minutos e segundos de um ângulo sexasimal e retorna o ângulo em decimais
def angulo(g,m,s):
angulom=m/60
angulos=s/3600
soma=g+angulom+angulos
return soma
'''
Essa função transforma ângulos decimais, contidos em uma lista, em ângulos sexasimais, agrupando-os em uma matriz de 3 colunas,
que são respectivamente os graus, minutos e segundos de cada ângulo. Com n ângulos distribuídos pelas linhas
'''
def graus (azi):
a=np.zeros((len(azi),3))
for i in range (0,len(azi),1):
graus=int(azi[i])
b=azi[i]-graus
c=b*60
minutos=int(c)
j=c-minutos
segundos=j*60
a[i,0]=graus
a[i,1]=minutos
a[i,2]=segundos
return a
#Essa função calcula o azimute de todos os pontos de acordo com a fórmula Azi=Azi-1+Dei
def azimute (angulo,dei):
azi=[]
for i in range (0,len(dei),1):
azimute=angulo+dei[i]
azi.append(azimute)
angulo=azimute
return (azi)
g=int(input('Graus do primeiro azimute:'))
m=int(input('Minutos do primeiro azimute:'))
s=int(input('Segundos do primeiro azimute:'))
n=int(input('Número de deflexões:'))
#Criou-se uma lista para armazenar as deflexões.
dei=[]
for i in range (0,n,1):
deflexao=float(input('Deflexão:'))
dei.append(deflexao)
#Depois de receber as entradas, transformamos os dados do primeiro azimute para decimal.
primeiroazi=(angulo(g,m,s))
#Aplicando na função o primeiro azimute e a lista de deflexões obtemos uma lista com os azimutes (em decimais) em todos os pontos
azimutes=azimute(primeiroazi,dei)
#Aplicar a lista com os valores do azimute (em decimais) e transformar em uma matiz com 3 colunas que são graus, minutos e segundos, respectivamente
print(graus(azimutes))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2258e8bf2faf8c3ae5d63dac8669d6dd32476e7a | 9c19350ad4ab5e41d9bc6c627d623f0edd8325c5 | /aleph/tests/test_collections_api.py | f734c01585f2ae4014ad72f125f7748b1f3b18d9 | [
"MIT"
] | permissive | gavinrozzi/aleph | f515a70fa87d1f19fe7288a5ac0b398a71dabfd5 | a8e3d10ec34b0d0a05b4daf3fdd2d09b96928b35 | refs/heads/master | 2020-03-21T00:56:14.108536 | 2018-08-17T13:10:12 | 2018-08-17T13:10:12 | 137,916,214 | 0 | 0 | MIT | 2018-08-17T13:10:13 | 2018-06-19T16:15:29 | Python | UTF-8 | Python | false | false | 4,174 | py | import json
from aleph.core import db
from aleph.model import Entity
from aleph.tests.util import TestCase
class CollectionsApiTestCase(TestCase):
def setUp(self):
super(CollectionsApiTestCase, self).setUp()
self.rolex = self.create_user(foreign_id='user_3')
self.col = self.create_collection(
label='Test Collection',
foreign_id='test_coll_entities_api',
category='leak',
countries=[]
)
self.ent = Entity.create({
'schema': 'Person',
'name': 'Winnie the Pooh',
}, self.col)
db.session.add(self.ent)
db.session.commit()
def test_index(self):
res = self.client.get('/api/2/collections')
assert res.status_code == 200, res
assert res.json['total'] == 0, res.json
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections',
headers=headers)
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
def test_view(self):
res = self.client.get('/api/2/collections/%s' % self.col.id)
assert res.status_code == 403, res
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections/%s' % self.col.id,
headers=headers)
assert res.status_code == 200, res
assert 'test_coll' in res.json['foreign_id'], res.json
assert 'Winnie' not in res.json['label'], res.json
def test_sitemap(self):
self.update_index()
url = '/api/2/collections/%s/sitemap.xml' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
data = res.data.decode('utf-8')
assert self.ent.id in data, data
def test_rdf(self):
url = '/api/2/collections/%s/rdf' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
def test_update_valid(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url,
headers=headers)
assert res.status_code == 200, res
data = res.json
data['label'] = 'Collected Collection'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 200, res.json
assert 'Collected' in res.json['label'], res.json
def test_update_no_label(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
data = res.json
data['label'] = ''
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
res = self.client.get(url, headers=headers)
data = res.json
data['category'] = 'banana'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
def test_delete(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
res = self.client.delete(url,
headers=headers)
assert res.status_code == 204, res
res = self.client.get(url,
headers=headers)
assert res.status_code == 404, res
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
af5920639862893c22731a5bd2fc1174a0685376 | 41b77a1a17244a727aa6acd95e91e8c674986049 | /leagueOfDrivers_BE/wx_league/migrations/0025_auto_20180812_0856.py | 11875c3038b5b09e6392b1b9a149f0bbd957fca7 | [] | no_license | DataSecretbase/Renjiu | 24424ca1742a3987a395bc5da54afa1e7f34fc84 | aa90d58d92d0c1936b0ee23e4f9c970135b480d5 | refs/heads/master | 2022-12-08T05:28:09.192623 | 2019-03-10T09:44:19 | 2019-03-10T09:44:19 | 140,274,553 | 1 | 2 | null | 2022-12-08T02:21:56 | 2018-07-09T11:15:07 | Python | UTF-8 | Python | false | false | 464 | py | # Generated by Django 2.1 on 2018-08-12 08:56
from django.db import migrations, models
import wx_league.models
class Migration(migrations.Migration):
dependencies = [
('wx_league', '0024_auto_20180812_0845'),
]
operations = [
migrations.AlterField(
model_name='icon',
name='display_pic',
field=models.ImageField(upload_to=wx_league.models.filepath, verbose_name='icon 对应'),
),
]
| [
"2144799613@qq.com"
] | 2144799613@qq.com |
39b5f1e225edc865f4828bc8ca4734edd479d6ce | 92f21431bb65074757b76ec41b2f5fa4b445c566 | /estomagordo-python3/day_6a.py | 3f718401f3d9615e5aaea6aabe160a5921626ca3 | [
"Apache-2.0"
] | permissive | kodsnack/advent_of_code_2019 | f45c6235ef7ddf8ee177be3069eddfb18b64d05c | b5478e4ce4a7cb223bbb61a8f7322f6e0f68684e | refs/heads/master | 2023-03-08T18:02:11.962777 | 2023-03-01T11:20:14 | 2023-03-01T11:20:14 | 223,724,445 | 9 | 53 | Apache-2.0 | 2023-03-01T11:20:15 | 2019-11-24T10:08:42 | Python | UTF-8 | Python | false | false | 624 | py | import helpers
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def solve(d):
graph = {}
for line in d:
a, b = line.split(')')
graph[b] = a
if a not in graph:
graph[a] = 'STOP'
count = 0
for node in graph.keys():
while node in graph and graph[node] in graph:
count += 1
node = graph[node]
return count
def read_and_solve():
with open('input_6.txt') as f:
data = [line.rstrip() for line in f]
return solve(data)
if __name__ == '__main__':
print(read_and_solve()) | [
"christofer.ohlsson@gmail.com"
] | christofer.ohlsson@gmail.com |
59a171100a3c7d5531c4504f41806319dd918ba9 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2370/60605/242607.py | ce8e14d4dde4ff225770cb30a7777d60cedc5d54 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | n = int(input())
t = n
res = ""
while t != 0:
remain = t % -2
t //= -2
if remain < 0:
t += 1
remain += 2
res = res + str(remain)
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f67f8316bc3b50611487d6432f716327ffa6bef6 | 7f6b06334e6556ac91a19d410149129217070d5e | /abaqus/abaquslib/abaqus_functions.py | dc4a4e53975b533f7a58b35fbcea3b5a3a90b3a4 | [] | no_license | saullocastro/programming | a402e5b7c34c80f0ce22e8a29ce7975b263f19c3 | 499938a566348649218dc3c0ec594a4babe4f1a4 | refs/heads/master | 2021-01-20T10:42:50.840178 | 2020-08-26T07:56:35 | 2020-08-26T07:56:35 | 21,904,820 | 11 | 2 | null | 2020-08-26T07:56:36 | 2014-07-16T14:47:32 | Python | UTF-8 | Python | false | false | 13,372 | py | r"""
=========================================================
Utilities Abaqus (:mod:`abaquslib.abaqus_functions`)
=========================================================
.. currentmodule:: abaquslib.abaqus_functions
Includes all utilities functions that must be executed from Abaqus.
"""
import numpy as np
from constants import (TOL, FLOAT, COLORS, COLOR_WHINE, COLOR_DARK_BLUE,
COLOR_BLACK)
def print_png(filename):
"""Print a png file from the current viewport
Parameters
----------
filename : str
The name of the output png file.
"""
from abaqus import session
from abaqusConstants import PNG
viewport=session.viewports[session.currentViewportName]
session.printToFile(fileName=filename,
format=PNG,
canvasObjects=(viewport,))
def set_default_view(cc):
"""Set a default view in order to compare figures from different models
Parameters
----------
cc : :class:`.ConeCyl` object
"""
from abaqusConstants import (USER_SPECIFIED, NODAL, COMPONENT, EXTRA_FINE,
FREE, UNIFORM, CONTINUOUS, ON, OFF)
odb=cc.attach_results()
if not odb:
print 'No .odb file found for %s!' % cc.jobname
return
dtm=odb.rootAssembly.datumCsyses[
'ASSEMBLY__T-INSTANCECYLINDER-CSYSCYLINDER']
viewport=session.viewports[session.currentViewportName]
viewport.odbDisplay.basicOptions.setValues(
averageElementOutput=False, transformationType=USER_SPECIFIED,
datumCsys=dtm)
viewport.odbDisplay.setPrimaryVariable(
variableLabel='U',
outputPosition=NODAL,
refinement=(COMPONENT, 'U1'),)
viewport.obasicOptions.setValues(averageElementOutput=True,
curveRefinementLevel=EXTRA_FINE)
viewport.odbDisplay.commonOptions.setValues(visibleEdges=FREE,
deformationScaling=UNIFORM,
uniformScaleFactor=5)
viewport.odbDisplay.contourOptions.setValues(contourStyle=CONTINUOUS)
viewport.restore()
viewport.viewportAnnotationOptions.setValues(compass=OFF)
viewport.viewportAnnotationOptions.setValues(triad=ON)
viewport.viewportAnnotationOptions.setValues(title=OFF)
viewport.viewportAnnotationOptions.setValues(state=OFF)
viewport.viewportAnnotationOptions.setValues(legend=ON)
viewport.viewportAnnotationOptions.setValues(legendTitle=OFF)
viewport.viewportAnnotationOptions.setValues(legendBox=OFF)
viewport.viewportAnnotationOptions.setValues(
legendFont='-*-arial narrow-bold-r-normal-*-*-140-*-*-p-*-*-*')
viewport.viewportAnnotationOptions.setValues(
legendFont='-*-arial narrow-bold-r-normal-*-*-180-*-*-p-*-*-*')
viewport.viewportAnnotationOptions.setValues(legendPosition=(1, 99))
viewport.viewportAnnotationOptions.setValues(legendDecimalPlaces=1)
viewport.setValues(origin=(0.0, -1.05833435058594),
height=188.030563354492,
width=203.452590942383)
viewport.view.setValues(viewOffsetX=-2.724,
viewOffsetY=-52.6898,
cameraUpVector=(-0.453666, -0.433365, 0.778705),
nearPlane=1192.17,
farPlane=2323.39,
width=750.942,
height=665.183,
cameraPosition=(1236.44, 1079.87, 889.94),
cameraTarget=(27.3027, -54.758, 306.503))
def edit_keywords(mod, text, before_pattern=None, insert=False):
"""Edit the keywords to add commands not available in Abaqus CAE
Parameters
----------
mod : Abaqus Model object
The model for which the keywords will be edited.
text : str
The text to be included.
before_pattern : str, optional
One pattern used to find where to put the given text.
insert : bool, optional
Insert the text instead of replacing it.
"""
mod.keywordBlock.synchVersions(storeNodesAndElements=False)
sieBlocks=mod.keywordBlock.sieBlocks
if before_pattern is None:
index=len(sieBlocks) - 2
else:
index=None
for i in range(len(sieBlocks)):
sieBlock=sieBlocks[i]
if sieBlock.find(before_pattern) > -1:
index=i-1
break
if index is None:
print 'WARNING - *edit_keywords failed !'
print ' %s pattern not found !' % before_pattern
#TODO better error handling here...
if insert:
mod.keywordBlock.insert(index, text)
else:
mod.keywordBlock.replace(index, text)
def create_composite_layup(name, stack, plyts, mat_names, region, part,
part_csys, symmetric=False, scaling_factor=1.,
axis_normal=2):
r"""Creates a composite layup
Parameters
----------
name : str
Name of the new composite layup.
stack : list
Stacking sequence represented by a list of orientations in degress.
The stacking sequence starts inwards a ends outwards. The 0 degree
angle is along the axial direction and the angles are measured using
the right-hand rule with the normal direction being normal to the
shell surface pointing outwards.
plyts : list
List containing the ply thicknesses.
mat_names : list
List containing the material name for each ply.
region : an Abaqus Region object
The region consisting of geometric faces, where this laminate will be
assigned to.
part : an Abaqus part Object
A part object where the layup will be created.
part_csys : a valid Datum object
The cylindrical coordinate system of the part object.
symmetric : bool, optional
A boolean telling whether the laminate is symmetric.
scaling_factor : float, optional
A scaling factor to be applied to each ply thickness. Used to apply
thickness imperfection in some cases.
axis_normal : int, optional
Reference
"""
from abaqusConstants import (MIDDLE_SURFACE, FROM_SECTION, SHELL, ON, OFF,
DEFAULT, UNIFORM, SIMPSON, GRADIENT, SYSTEM, ROTATION_NONE,
AXIS_1, AXIS_2, AXIS_3, SPECIFY_THICKNESS, SPECIFY_ORIENT)
myLayup=part.CompositeLayup(name=name,
description='stack from inside to outside',
offsetType=MIDDLE_SURFACE,
symmetric=False,
thicknessAssignment=FROM_SECTION,
elementType=SHELL)
myLayup.Section(preIntegrate=OFF,
integrationRule=SIMPSON,
thicknessType=UNIFORM,
poissonDefinition=DEFAULT,
temperature=GRADIENT,
useDensity=OFF)
if axis_normal == 1:
axis = AXIS_1
elif axis_normal == 2:
axis = AXIS_2
elif axis_normal == 3:
axis = AXIS_3
else:
raise ValueError('Invalid value for `axis_normal`')
myLayup.ReferenceOrientation(orientationType=SYSTEM,
localCsys=part_csys,
fieldName='',
additionalRotationType=ROTATION_NONE,
angle=0.,
additionalRotationField='',
axis=axis)
#CREATING ALL PLIES
numIntPoints=3
if len(stack)==1:
numIntPoints=5
for i, angle in enumerate(stack):
plyt=plyts[i]
mat_name=mat_names[i]
myLayup.CompositePly(suppressed=False,
plyName='ply_%02d' % (i+1),
region=region,
material=mat_name,
thicknessType=SPECIFY_THICKNESS,
thickness=plyt*scaling_factor,
orientationValue=angle,
orientationType=SPECIFY_ORIENT,
numIntPoints=numIntPoints)
def createDiscreteField(mod, odb, step_name, frame_num):
from abaqusConstants import (NODES, PRESCRIBEDCONDITION_DOF)
u=odb.steps[step_name].frames[frame_num].fieldOutputs['U']
ur=odb.steps[step_name].frames[frame_num].fieldOutputs['UR']
datas=[]
for u_value, ur_value in zip(u.values, ur.values):
id=u_value.nodeLabel
data=np.concatenate((u_value.data, ur_value.data))
datas.append([id, data])
datas.sort(key=lambda x: x[0])
list_ids=[]
list_dof_values=[]
for data in datas:
list_ids += [data[0] for i in xrange(6)]
for dof in xrange(1,7):
list_dof_values += [float(dof), data[1][dof-1]]
tuple_ids=tuple(list_ids)
tuple_dof_values=tuple(list_dof_values)
mod.DiscreteField(name='discreteField',
description='',
location=NODES,
fieldType=PRESCRIBEDCONDITION_DOF,
dataWidth=2,
defaultValues=(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
data=(('', 2, tuple_ids, tuple_dof_values),))
def create_sketch_plane(cc, entity):
"""Creates a sketch plane tangent to the shell surface
Parameters
----------
cc : :class:`.ConeCyl` object
entity : object
Any object with the attribute: ``thetadeg``, usually a
:class:`.Imperfection`.
Returns
-------
plane : :class:`.Plane` object
"""
from abaqus import mdb
part = mdb.models[cc.model_name].parts[cc.part_name_shell]
for plane in cc.sketch_planes:
if abs(plane.thetadeg - entity.thetadeg) < TOL:
return plane
x1, y1, z1 = utils.cyl2rec(1.05*cc.r, entity.thetadeg, 0.)
v1 = np.array([x1, y1, z1], dtype=FLOAT)
x2, y2, z2 = utils.cyl2rec(1.05*cc.r2, entity.thetadeg, cc.h)
v2 = np.array([x2, y2, z2], dtype=FLOAT)
v3 = np.cross(v2, v1)
if abs(v3.max()) > abs(v3.min()):
v3 = v3/v3.max() * cc.h/2.
else:
v3 = v3/abs(v3.min()) * cc.h/2.
x3, y3, z3 = v2 + v3
pt = part.DatumPointByCoordinate(coords=(x1, y1, z1))
p1 = part.datums[pt.id]
pt = part.DatumPointByCoordinate(coords=(x2, y2, z2))
p2 = part.datums[pt.id]
pt = part.DatumPointByCoordinate(coords=(x3, y3, z3))
p3 = part.datums[pt.id]
plane = geom.Plane()
plane.p1 = p1
plane.p2 = p2
plane.p3 = p3
plane.part = part
plane.create()
plane.thetadeg = entity.thetadeg
cc.sketch_planes.append(plane)
return plane
def set_colors_ti(cc):
from abaqus import mdb, session
from abaqusConstants import ON
part = mdb.models[cc.model_name].parts[cc.part_name_shell]
viewport = session.viewports[session.currentViewportName]
cmap = viewport.colorMappings['Set']
viewport.setColor(colorMapping=cmap)
viewport.enableMultipleColors()
viewport.setColor(initialColor='#BDBDBD')
keys = part.sets.keys()
names = [k for k in keys if k.find('Set_measured_imp_t') > -1]
overrides = dict([[names[i],(True,COLORS[i],'Default',COLORS[i])]
for i in range(len(names))])
dummylen = len(keys)-len(overrides)
new_COLORS = tuple([COLORS[-1]]*dummylen + list(COLORS))
session.autoColors.setValues(colors=new_COLORS)
cmap.updateOverrides(overrides=overrides)
viewport.partDisplay.setValues(mesh=ON)
viewport.partDisplay.geometryOptions.setValues(referenceRepresentation=ON)
viewport.disableMultipleColors()
def printLBmodes():
from abaqus import session
from abaqusConstants import DPI_1200, EXTRA_FINE, OFF, PNG
vp = session.viewports[session.currentViewportName]
session.psOptions.setValues(logo=OFF,
resolution=DPI_1200,
shadingQuality=EXTRA_FINE)
session.printOptions.setValues(reduceColors=False)
for i in xrange(1,51):
vp.odbDisplay.setFrame(step=0, frame=i)
session.printToFile(fileName='mode %02d.png'%i,
format=PNG,
canvasObjects=(vp,))
def get_current_odbdisplay():
from abaqus import session
viewport = session.viewports[session.currentViewportName]
try:
name = viewport.odbDisplay.name
except:
return None
return viewport.odbDisplay
def get_current_odb():
from abaqus import session
viewport = session.viewports[session.currentViewportName]
odbdisplay = get_current_odbdisplay()
if odbdisplay:
return session.odbs[odbdisplay.name]
else:
return None
def get_current_step_name():
odbdisplay = get_current_odbdisplay()
if odbdisplay:
index, frame_num = odbdisplay.fieldFrame
return odbdisplay.fieldSteps[index][0]
else:
return None
def get_current_frame():
odbdisplay = get_current_odbdisplay()
if not odbdisplay:
return None
step_name = get_current_step_name()
step_num, frame_num = odbdisplay.fieldFrame
odb = get_current_odb()
step = odb.steps[step_name]
return step.frames[frame_num]
| [
"saullogiovani@gmail.com"
] | saullogiovani@gmail.com |
1c415503eb40a3d71e84667fef1513d4ecb304cb | 1ddd9929238af090fd15acb08c3a9a034039bf7c | /resolucao_python/unidade_1/script_3.py | e3a0349b9f4fa9f316310e6c25eff4d29c562754 | [] | no_license | frclasso/estatistica_geral_puc_minas_2020 | 7e2e12958411416c8f912e7d349b908d18dbe2a9 | 9d98d1ec8e645e882885833709b610065b2d4fc6 | refs/heads/main | 2023-02-23T23:06:44.785060 | 2021-01-31T21:36:25 | 2021-01-31T21:36:25 | 325,156,858 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | from typing import List
import statistics
import math
class CaculaQuartoQuartil:
"""
PROBLEMA 2
Um pesquisador está interessado em avaliar o tempos (em segundos) que os
consumidores demoram entre o início e a finalização de uma compra em um
determinado site na internet. Para isso, observou 12 consumidores escolhidos
aleatoriamente no sistema. Os dados encontram-se abaixo:
71, 73, 73, 74, 74, 75
76, 77, 77, 79, 81, 83
"""
def __init__(self, dados: List) -> None:
self.dados = dados
def calcula_percentil(self) -> int:
"""Quarto quartil corresponde ao quarto decil, 40/100
Valor decimal deve ser arredondado para cima."""
percentil = round((40/100) * len(self.dados), 1)
if isinstance(percentil, float):
return math.ceil(percentil) # arredondando pra cima (4.8)
else:
return percentil
def calcula_posicao(self, value):
"""Retorna posição a ser avaliada, caso o resultado seja um número inteiro
fazer a média entre o valor obtido e o valor da próxima posição."""
posicao = self.dados[value - 1] # Python inicia a contagem por 0 (zero)
return posicao
if __name__ == "__main__":
tempo_compra = [71, 73, 73, 74, 74, 75, 76, 77, 77, 79, 81, 83]
c = CaculaQuartoQuartil(tempo_compra)
perc = c.calcula_percentil()
print(f"Percentil [Posição] {perc}")
print(f"Valor correspondente a Posição: {c.calcula_posicao(perc)}")
print(f"Atualmente 40% dos demoram até {c.calcula_posicao(perc)} segundos"
f" entre o início e a finalização da compra")
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
c326d3f5a52588f010147b3d3cb5aa5fd79be81e | 0a33cc0ebb67c51cc38750f0f04c3e6c088e3b1a | /tests/components/rfxtrx/test_device_trigger.py | 8e5ee27504ba419c811be9e585c59811da12a6e7 | [
"Apache-2.0"
] | permissive | robert-alfaro/home-assistant | e9bb08ad22a167ed226fb3de8f5b36acfc393548 | 4a53121b58b77a318f08c64ad2c5372a16b800e0 | refs/heads/dev | 2023-02-28T06:46:23.217246 | 2022-04-26T17:30:08 | 2022-04-26T17:30:08 | 115,894,662 | 4 | 0 | Apache-2.0 | 2023-02-22T06:21:08 | 2018-01-01T02:00:35 | Python | UTF-8 | Python | false | false | 5,694 | py | """The tests for RFXCOM RFXtrx device triggers."""
from __future__ import annotations
from typing import Any, NamedTuple
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.helpers.device_registry import DeviceRegistry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
)
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
class EventTestData(NamedTuple):
"""Test data linked to a device."""
code: str
device_identifiers: set[tuple[str, str, str, str]]
type: str
subtype: str
DEVICE_LIGHTING_1 = {("rfxtrx", "10", "0", "E5")}
EVENT_LIGHTING_1 = EventTestData("0710002a45050170", DEVICE_LIGHTING_1, "command", "On")
DEVICE_ROLLERTROL_1 = {("rfxtrx", "19", "0", "009ba8:1")}
EVENT_ROLLERTROL_1 = EventTestData(
"09190000009ba8010100", DEVICE_ROLLERTROL_1, "command", "Down"
)
DEVICE_FIREALARM_1 = {("rfxtrx", "20", "3", "a10900:32")}
EVENT_FIREALARM_1 = EventTestData(
"08200300a109000670", DEVICE_FIREALARM_1, "status", "Panic"
)
@pytest.fixture(name="device_reg")
def device_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
async def setup_entry(hass, devices):
"""Construct a config setup."""
entry_data = create_rfx_test_cfg(devices=devices)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
@pytest.mark.parametrize(
"event,expected",
[
[
EVENT_LIGHTING_1,
[
{"type": "command", "subtype": subtype}
for subtype in [
"Off",
"On",
"Dim",
"Bright",
"All/group Off",
"All/group On",
"Chime",
"Illegal command",
]
],
]
],
)
async def test_get_triggers(hass, device_reg, event: EventTestData, expected):
"""Test we get the expected triggers from a rfxtrx."""
await setup_entry(hass, {event.code: {}})
device_entry = device_reg.async_get_device(event.device_identifiers, set())
expected_triggers = [
{
"domain": DOMAIN,
"device_id": device_entry.id,
"platform": "device",
"metadata": {},
**expect,
}
for expect in expected
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
triggers = [value for value in triggers if value["domain"] == "rfxtrx"]
assert_lists_same(triggers, expected_triggers)
@pytest.mark.parametrize(
"event",
[
EVENT_LIGHTING_1,
EVENT_ROLLERTROL_1,
EVENT_FIREALARM_1,
],
)
async def test_firing_event(hass, device_reg: DeviceRegistry, rfxtrx, event):
"""Test for turn_on and turn_off triggers firing."""
await setup_entry(hass, {event.code: {"fire_event": True}})
device_entry = device_reg.async_get_device(event.device_identifiers, set())
assert device_entry
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"type": event.type,
"subtype": event.subtype,
},
"action": {
"service": "test.automation",
"data_template": {"some": ("{{trigger.platform}}")},
},
},
]
},
)
await hass.async_block_till_done()
await rfxtrx.signal(event.code)
assert len(calls) == 1
assert calls[0].data["some"] == "device"
async def test_invalid_trigger(hass, device_reg: DeviceRegistry):
"""Test for invalid actions."""
event = EVENT_LIGHTING_1
await setup_entry(hass, {event.code: {"fire_event": True}})
device_identifers: Any = event.device_identifiers
device_entry = device_reg.async_get_device(device_identifers, set())
assert device_entry
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"type": event.type,
"subtype": "invalid",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("{{trigger.platform}}")},
},
},
]
},
)
await hass.async_block_till_done()
assert len(notifications := hass.states.async_all("persistent_notification")) == 1
assert (
"The following integrations and platforms could not be set up"
in notifications[0].attributes["message"]
)
| [
"noreply@github.com"
] | robert-alfaro.noreply@github.com |
4cb41dfca9cf17c318af5b90764e56ae92b007c4 | 7bf449f96e1c72cf097dd5264c5efa4337fe1ad7 | /final_proj/VisualAnalyses.py | c76803b3fed0340d336c668b1fd9b0ec7d009f45 | [] | no_license | rgc292/final_project | 9229573a258554655c464306a8d8be4ddade5aa2 | c0ec7690bd1520020e35864eeaec501fd45f3d32 | refs/heads/master | 2021-01-18T02:32:39.062081 | 2015-12-17T03:10:50 | 2015-12-17T03:10:50 | 48,068,674 | 0 | 1 | null | 2015-12-15T20:59:08 | 2015-12-15T20:59:08 | null | UTF-8 | Python | false | false | 1,057 | py | '''
Created on Dec 12, 2015
@author: Kristen kk3175 & Rafael rgc292
'''
import line_graph as lg
import bar_graph as bg
from Pie_Chart import Pie_Chart
'''
Module to perform visual analyses of a specific NYC housing complaint.
Takes a complaint dataset as an argument.
Makes pie charts, line graphs, and bar graphs so the user can understand the housing
complaint data from different viewpoints.
'''
def make_visual_analyses(complaint_dataset):
print 'Making visual analysis tools...'
try:
pie_chart = Pie_Chart(complaint_dataset)
pie_chart.plot_by_violation_type()
line_graph = lg.LineGraph()
line_graph.plot_line_graph(complaint_dataset)
bar_graph = bg.BarGraph()
bar_graph.plot_bar_graph(complaint_dataset)
print '\nFigures are now saved in the figures folder.'
except (ValueError, TypeError):
print "\nYour range of dates does not contain information for your choice of ID."
print "Please, choose a different combination."
| [
"Rafa@192.168.1.139"
] | Rafa@192.168.1.139 |
2944d77e65016a65342224fb147b9da82d155353 | 8cc30a27835e205a3476783106ca1605a6a85c48 | /amy/trainings/views.py | 60e04fb4bf1ebf7c0f1ee24522888df26eb7dc5e | [
"MIT"
] | permissive | gaybro8777/amy | d968edc78bbd3f63f3353450334721628dbbc0f4 | 3cf99aed58a0f0acf83d2645a30d8408208ccea9 | refs/heads/develop | 2023-03-07T22:08:28.692700 | 2021-02-23T18:06:06 | 2021-02-23T18:06:06 | 341,930,505 | 0 | 0 | MIT | 2021-02-24T17:22:08 | 2021-02-24T14:40:43 | null | UTF-8 | Python | false | false | 7,189 | py | from django.contrib import messages
from django.db.models import (
Case,
When,
IntegerField,
Count,
F,
Sum,
Prefetch,
)
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from trainings.filters import (
TraineeFilter,
)
from trainings.forms import (
TrainingProgressForm,
BulkAddTrainingProgressForm,
BulkDiscardProgressesForm,
)
from workshops.base_views import (
AMYCreateView,
AMYUpdateView,
AMYDeleteView,
AMYListView,
RedirectSupportMixin,
PrepopulationSupportMixin,
)
from workshops.models import (
Badge,
Event,
Person,
Task,
TrainingProgress,
TrainingRequirement,
)
from workshops.util import (
get_pagination_items,
admin_required,
OnlyForAdminsMixin,
)
class AllTrainings(OnlyForAdminsMixin, AMYListView):
context_object_name = 'all_trainings'
template_name = 'trainings/all_trainings.html'
queryset = Event.objects.filter(tags__name='TTT').annotate(
trainees=Count(Case(When(task__role__name='learner',
then=F('task__person__id')),
output_field=IntegerField()),
distinct=True),
finished=Count(Case(When(task__role__name='learner',
task__person__badges__in=Badge.objects.instructor_badges(),
then=F('task__person__id')),
output_field=IntegerField()),
distinct=True),
).exclude(trainees=0).order_by('-start')
title = 'All Instructor Trainings'
# ------------------------------------------------------------
# Instructor Training related views
class TrainingProgressCreate(RedirectSupportMixin,
PrepopulationSupportMixin,
OnlyForAdminsMixin,
AMYCreateView):
model = TrainingProgress
form_class = TrainingProgressForm
populate_fields = ['trainee']
def get_initial(self):
initial = super().get_initial()
initial['evaluated_by'] = self.request.user
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'].helper = context['form'].create_helper
return context
class TrainingProgressUpdate(RedirectSupportMixin, OnlyForAdminsMixin,
AMYUpdateView):
model = TrainingProgress
form_class = TrainingProgressForm
template_name = 'trainings/trainingprogress_form.html'
class TrainingProgressDelete(RedirectSupportMixin, OnlyForAdminsMixin,
AMYDeleteView):
model = TrainingProgress
success_url = reverse_lazy('all_trainees')
def all_trainees_queryset():
def has_badge(badge):
return Sum(Case(When(badges__name=badge, then=1),
default=0,
output_field=IntegerField()))
return (
Person.objects
.annotate_with_instructor_eligibility()
.prefetch_related(
Prefetch(
'task_set',
to_attr='training_tasks',
queryset=Task.objects.filter(role__name='learner',
event__tags__name='TTT')
),
'training_tasks__event',
'trainingrequest_set',
'trainingprogress_set',
'trainingprogress_set__requirement',
'trainingprogress_set__evaluated_by',
).annotate(
is_swc_instructor=has_badge('swc-instructor'),
is_dc_instructor=has_badge('dc-instructor'),
is_lc_instructor=has_badge('lc-instructor'),
is_instructor=Sum(
Case(
When(
badges__name__in=Badge.INSTRUCTOR_BADGES,
then=1
),
default=0,
output_field=IntegerField()
)
),
).order_by('family', 'personal')
)
@admin_required
def all_trainees(request):
filter = TraineeFilter(
request.GET,
queryset=all_trainees_queryset(),
)
trainees = get_pagination_items(request, filter.qs)
if request.method == 'POST' and 'discard' in request.POST:
# Bulk discard progress of selected trainees
form = BulkAddTrainingProgressForm()
discard_form = BulkDiscardProgressesForm(request.POST)
if discard_form.is_valid():
for trainee in discard_form.cleaned_data['trainees']:
TrainingProgress.objects.filter(trainee=trainee)\
.update(discarded=True)
messages.success(request, 'Successfully discarded progress of '
'all selected trainees.')
# Raw uri contains GET parameters from django filters. We use it
# to preserve filter settings.
return redirect(request.get_raw_uri())
elif request.method == 'POST' and 'submit' in request.POST:
# Bulk add progress to selected trainees
instance = TrainingProgress(evaluated_by=request.user)
form = BulkAddTrainingProgressForm(request.POST, instance=instance)
discard_form = BulkDiscardProgressesForm()
if form.is_valid():
for trainee in form.cleaned_data['trainees']:
TrainingProgress.objects.create(
trainee=trainee,
evaluated_by=request.user,
requirement=form.cleaned_data['requirement'],
state=form.cleaned_data['state'],
discarded=False,
event=form.cleaned_data['event'],
url=form.cleaned_data['url'],
notes=form.cleaned_data['notes'],
)
messages.success(request, 'Successfully changed progress of '
'all selected trainees.')
return redirect(request.get_raw_uri())
else: # GET request
# If the user filters by training, we want to set initial values for
# "requirement" and "training" fields.
training_id = request.GET.get('training', None) or None
try:
initial = {
'event': Event.objects.get(pk=training_id),
'requirement': TrainingRequirement.objects.get(name='Training')
}
except Event.DoesNotExist: # or there is no `training` GET parameter
initial = None
form = BulkAddTrainingProgressForm(initial=initial)
discard_form = BulkDiscardProgressesForm()
context = {'title': 'Trainees',
'all_trainees': trainees,
'swc': Badge.objects.get(name='swc-instructor'),
'dc': Badge.objects.get(name='dc-instructor'),
'lc': Badge.objects.get(name='lc-instructor'),
'filter': filter,
'form': form,
'discard_form': discard_form}
return render(request, 'trainings/all_trainees.html', context)
| [
"piotr@banaszkiewicz.org"
] | piotr@banaszkiewicz.org |
724a23a13c33701d678c2ee7d967eef75ab6289a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03354/s490945583.py | f03b65974dfe8d09656e62cab53ea47171b1c89a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import sys
input = sys.stdin.readline
n, m = [ int(v) for v in input().split() ]
num = [ int(v)-1 for v in input().split() ]
parent_list = [ i for i in range(n) ]
def root(x):
while parent_list[x] != x:
parent_list[x] = parent_list[parent_list[x]]
x = parent_list[x]
return x
for i in range(m):
a, b = [ int(v)-1 for v in input().split() ]
ra, rb = root(a), root(b)
if ra != rb:
parent_list[ra] = rb
ans_list = []
for i, v in enumerate(num):
if root(i) == root(v):
ans_list.append(True)
else:
ans_list.append(False)
print(ans_list.count(True))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
46d0eabafb34b253ecf80a7b0f1390f6b39739a0 | 9545652800884f0e54fe6595d8634c29ea4827a2 | /中级算法/leetCode_80_常数时间插入,删除和获取随机元素.py | 52f7adb9f1fa3ec80b550e936ff7cfa8d7b23901 | [] | no_license | challeger/leetCode | 662d9f600a40fd8970568679656f6911a6fdfb05 | d75c35b6f8ab33c158de7fa977ab0b16dac4fc25 | refs/heads/master | 2023-01-13T07:34:42.464959 | 2020-11-13T02:40:31 | 2020-11-13T02:40:31 | 286,426,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | """
day: 2020-08-22
url: https://leetcode-cn.com/leetbook/read/top-interview-questions-medium/xw5rt1/
题目名: 常数时间插入,删除和获取随机元素
题目描述: 设计一个支持在平均时间复杂度O(1)下,执行以下操作的数据结构
1.insert(val):当元素val不存在时,向集合中插入此项
2.remove(val):当元素val存在时,从集合中删除此项
3.getRandom:随机返回现有集合中的一项,每个元素应该有相同的概率被返回
示例:
// 初始化一个空的集合。
RandomizedSet randomSet = new RandomizedSet();
// 向集合中插入 1 。返回 true 表示 1 被成功地插入。
randomSet.insert(1);
// 返回 false ,表示集合中不存在 2 。
randomSet.remove(2);
// 向集合中插入 2 。返回 true 。集合现在包含 [1,2] 。
randomSet.insert(2);
// getRandom 应随机返回 1 或 2 。
randomSet.getRandom();
// 从集合中移除 1 ,返回 true 。集合现在包含 [2] 。
randomSet.remove(1);
// 2 已在集合中,所以返回 false 。
randomSet.insert(2);
// 由于 2 是集合中唯一的数字,getRandom 总是返回 2 。
randomSet.getRandom();
思路:
insert直接利用列表的append
remove每次将要删除的元素与尾部元素对调,然后删除尾部元素
random利用random模块中的choice即可
所以需要一个list来记录数据,以及一个dict来记录数值对应的索引.
在insert时,self.dict[value] = len(self.list)
self.list.append(value)
在remove时,取出要删除的值的索引,以及最后一个数的值
然后将最后一个数的值放到要删除的值上,并设置dict中的索引
然后pop(), del self.dict[value]
"""
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.data = []
self._hashmap = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val not in self._hashmap:
self._hashmap[val] = len(self.data)
self.data.append(val)
return True
return False
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self._hashmap:
idx, last_element = self._hashmap[val], self.data[-1]
self.data[-1], self._hashmap[last_element] = self.data[idx], idx
self.data.pop()
del self._hashmap[val]
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
from random import choice
return choice(self.data)
| [
"799613500@qq.com"
] | 799613500@qq.com |
7a00a36399c4b0d97304920820945c716f541d46 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/sympy_sympy/sympy-master/sympy/utilities/autowrap.py | 950e1ffff89b6886a3953cf192950cf2ee9606a9 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 34,998 | py | """Module for compiling codegen output, and wrap the binary for use in
python.
.. note:: To use the autowrap module it must first be imported
>>> from sympy.utilities.autowrap import autowrap
This module provides a common interface for different external backends, such
as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are
implemented) The goal is to provide access to compiled binaries of acceptable
performance with a one-button user interface, i.e.
>>> from sympy.abc import x,y
>>> expr = ((x - y)**(25)).expand()
>>> binary_callable = autowrap(expr)
>>> binary_callable(1, 2)
-1.0
The callable returned from autowrap() is a binary python function, not a
SymPy object. If it is desired to use the compiled function in symbolic
expressions, it is better to use binary_function() which returns a SymPy
Function object. The binary callable is attached as the _imp_ attribute and
invoked when a numerical evaluation is requested with evalf(), or with
lambdify().
>>> from sympy.utilities.autowrap import binary_function
>>> f = binary_function('f', expr)
>>> 2*f(x, y) + y
y + 2*f(x, y)
>>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2})
0.e-110
The idea is that a SymPy user will primarily be interested in working with
mathematical expressions, and should not have to learn details about wrapping
tools in order to evaluate expressions numerically, even if they are
computationally expensive.
When is this useful?
1) For computations on large arrays, Python iterations may be too slow,
and depending on the mathematical expression, it may be difficult to
exploit the advanced index operations provided by NumPy.
2) For *really* long expressions that will be called repeatedly, the
compiled binary should be significantly faster than SymPy's .evalf()
3) If you are generating code with the codegen utility in order to use
it in another project, the automatic python wrappers let you test the
binaries immediately from within SymPy.
4) To create customized ufuncs for use with numpy arrays.
See *ufuncify*.
When is this module NOT the best approach?
1) If you are really concerned about speed or memory optimizations,
you will probably get better results by working directly with the
wrapper tools and the low level code. However, the files generated
by this utility may provide a useful starting point and reference
code. Temporary files will be left intact if you supply the keyword
tempdir="path/to/files/".
2) If the array computation can be handled easily by numpy, and you
don't need the binaries for another project.
"""
from __future__ import print_function, division
_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'), 'modules': ('numpy',)}
import sys
import os
import shutil
import tempfile
from subprocess import STDOUT, CalledProcessError, check_output
from string import Template
from sympy.core.cache import cacheit
from sympy.core.compatibility import range, iterable
from sympy.core.function import Lambda
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.utilities.codegen import (make_routine, get_code_generator,
OutputArgument, InOutArgument, InputArgument,
CodeGenArgumentListError, Result, ResultBase, CCodeGen)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.decorator import doctest_depends_on
class CodeWrapError(Exception):
pass
class CodeWrapper(object):
"""Base Class for code wrappers"""
_filename = "wrapped_code"
_module_basename = "wrapper_module"
_module_counter = 0
@property
def filename(self):
return "%s_%s" % (self._filename, CodeWrapper._module_counter)
@property
def module_name(self):
return "%s_%s" % (self._module_basename, CodeWrapper._module_counter)
def __init__(self, generator, filepath=None, flags=[], verbose=False):
"""
generator -- the code generator to use
"""
self.generator = generator
self.filepath = filepath
self.flags = flags
self.quiet = not verbose
@property
def include_header(self):
return bool(self.filepath)
@property
def include_empty(self):
return bool(self.filepath)
def _generate_code(self, main_routine, routines):
routines.append(main_routine)
self.generator.write(
routines, self.filename, True, self.include_header,
self.include_empty)
def wrap_code(self, routine, helpers=[]):
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routine, helpers)
self._prepare_files(routine)
self._process_files(routine)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, routine.name)
def _process_files(self, routine):
command = self.command
command.extend(self.flags)
try:
retoutput = check_output(command, stderr=STDOUT)
except CalledProcessError as e:
raise CodeWrapError(
"Error while executing command: %s. Command output is:\n%s" % (
" ".join(command), e.output.decode()))
if not self.quiet:
print(retoutput)
class DummyWrapper(CodeWrapper):
"""Class used for testing independent of backends """
template = """# dummy module for testing of SymPy
def %(name)s():
return "%(expr)s"
%(name)s.args = "%(args)s"
%(name)s.returns = "%(retvals)s"
"""
def _prepare_files(self, routine):
return
def _generate_code(self, routine, helpers):
with open('%s.py' % self.module_name, 'w') as f:
printed = ", ".join(
[str(res.expr) for res in routine.result_variables])
# convert OutputArguments to return value like f2py
args = filter(lambda x: not isinstance(
x, OutputArgument), routine.arguments)
retvals = []
for val in routine.result_variables:
if isinstance(val, Result):
retvals.append('nameless')
else:
retvals.append(val.result_var)
print(DummyWrapper.template % {
'name': routine.name,
'expr': printed,
'args': ", ".join([str(a.name) for a in args]),
'retvals': ", ".join([str(val) for val in retvals])
}, end="", file=f)
def _process_files(self, routine):
return
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
class CythonCodeWrapper(CodeWrapper):
"""Wrapper that uses Cython"""
setup_template = (
"try:\n"
" from setuptools import setup\n"
" from setuptools import Extension\n"
"except ImportError:\n"
" from distutils.core import setup\n"
" from distutils.extension import Extension\n"
"from Cython.Distutils import build_ext\n"
"{np_import}"
"\n"
"setup(\n"
" cmdclass = {{'build_ext': build_ext}},\n"
" ext_modules = [Extension({ext_args},\n"
" extra_compile_args=['-std=c99'])],\n"
"{np_includes}"
" )")
pyx_imports = (
"import numpy as np\n"
"cimport numpy as np\n\n")
pyx_header = (
"cdef extern from '{header_file}.h':\n"
" {prototype}\n\n")
pyx_func = (
"def {name}_c({arg_string}):\n"
"\n"
"{declarations}"
"{body}")
def __init__(self, *args, **kwargs):
super(CythonCodeWrapper, self).__init__(*args, **kwargs)
self._need_numpy = False
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine):
pyxfilename = self.module_name + '.pyx'
codefilename = "%s.%s" % (self.filename, self.generator.code_extension)
# pyx
with open(pyxfilename, 'w') as f:
self.dump_pyx([routine], f, self.filename)
# setup.py
ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])]
if self._need_numpy:
np_import = 'import numpy as np\n'
np_includes = ' include_dirs = [np.get_include()],\n'
else:
np_import = ''
np_includes = ''
with open('setup.py', 'w') as f:
f.write(self.setup_template.format(ext_args=", ".join(ext_args),
np_import=np_import,
np_includes=np_includes))
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name + '_c')
def dump_pyx(self, routines, f, prefix):
"""Write a Cython file with python wrappers
This file contains all the definitions of the routines in c code and
refers to the header file.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
"""
headers = []
functions = []
for routine in routines:
prototype = self.generator.get_prototype(routine)
# C Function Header Import
headers.append(self.pyx_header.format(header_file=prefix,
prototype=prototype))
# Partition the C function arguments into categories
py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments)
# Function prototype
name = routine.name
arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args)
# Local Declarations
local_decs = []
for arg, val in py_inf.items():
proto = self._prototype_arg(arg)
mat, ind = val
local_decs.append(" cdef {0} = {1}.shape[{2}]".format(proto, mat, ind))
local_decs.extend([" cdef {0}".format(self._declare_arg(a)) for a in py_loc])
declarations = "\n".join(local_decs)
if declarations:
declarations = declarations + "\n"
# Function Body
args_c = ", ".join([self._call_arg(a) for a in routine.arguments])
rets = ", ".join([str(r.name) for r in py_rets])
if routine.results:
body = ' return %s(%s)' % (routine.name, args_c)
if rets:
body = body + ', ' + rets
else:
body = ' %s(%s)\n' % (routine.name, args_c)
body = body + ' return ' + rets
functions.append(self.pyx_func.format(name=name, arg_string=arg_string,
declarations=declarations, body=body))
# Write text to file
if self._need_numpy:
# Only import numpy if required
f.write(self.pyx_imports)
f.write('\n'.join(headers))
f.write('\n'.join(functions))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_args = []
py_returns = []
py_locals = []
py_inferred = {}
for arg in args:
if isinstance(arg, OutputArgument):
py_returns.append(arg)
py_locals.append(arg)
elif isinstance(arg, InOutArgument):
py_returns.append(arg)
py_args.append(arg)
else:
py_args.append(arg)
# Find arguments that are array dimensions. These can be inferred
# locally in the Cython code.
if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions:
dims = [d[1] + 1 for d in arg.dimensions]
sym_dims = [(i, d) for (i, d) in enumerate(dims) if isinstance(d, Symbol)]
for (i, d) in sym_dims:
py_inferred[d] = (arg.name, i)
for arg in args:
if arg.name in py_inferred:
py_inferred[arg] = py_inferred.pop(arg.name)
# Filter inferred arguments from py_args
py_args = [a for a in py_args if a not in py_inferred]
return py_returns, py_args, py_locals, py_inferred
def _prototype_arg(self, arg):
mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}"
np_types = {'double': 'np.double_t',
'int': 'np.int_t'}
t = arg.get_datatype('c')
if arg.dimensions:
self._need_numpy = True
ndim = len(arg.dimensions)
mtype = np_types[t]
return mat_dec.format(mtype=mtype, ndim=ndim, name=arg.name)
else:
return "%s %s" % (t, str(arg.name))
def _declare_arg(self, arg):
proto = self._prototype_arg(arg)
if arg.dimensions:
shape = '(' + ','.join(str(i[1] + 1) for i in arg.dimensions) + ')'
return proto + " = np.empty({shape})".format(shape=shape)
else:
return proto + " = 0"
def _call_arg(self, arg):
if arg.dimensions:
t = arg.get_datatype('c')
return "<{0}*> {1}.data".format(t, arg.name)
elif isinstance(arg, ResultBase):
return "&{0}".format(arg.name)
else:
return str(arg.name)
class F2PyCodeWrapper(CodeWrapper):
"""Wrapper that uses f2py"""
@property
def command(self):
filename = self.filename + '.' + self.generator.code_extension
args = ['-c', '-m', self.module_name, filename]
command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args
return command
def _prepare_files(self, routine):
pass
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def _get_code_wrapper_class(backend):
wrappers = {'F2PY': F2PyCodeWrapper, 'CYTHON': CythonCodeWrapper,
'DUMMY': DummyWrapper}
return wrappers[backend.upper()]
# Here we define a lookup of backends -> tuples of languages. For now, each
# tuple is of length 1, but if a backend supports more than one language,
# the most preferable language is listed first.
_lang_lookup = {'CYTHON': ('C',),
'F2PY': ('F95',),
'NUMPY': ('C',),
'DUMMY': ('F95',)} # Dummy here just for testing
def _infer_language(backend):
"""For a given backend, return the top choice of language"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
return langs[0]
def _validate_backend_language(backend, language):
"""Throws error if backend and language are incompatible"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
if language.upper() not in langs:
raise ValueError(("Backend {0} and language {1} are "
"incompatible").format(backend, language))
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def autowrap(
expr, language=None, backend='f2py', tempdir=None, args=None, flags=None,
verbose=False, helpers=None):
"""Generates python callable binaries based on the math expression.
Parameters
----------
expr
The SymPy expression that should be wrapped as a binary routine.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'f2py' [default],
or 'cython'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
args : iterable, optional
An ordered iterable of symbols. Specifies the argument sequence for the
function.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : iterable, optional
Used to define auxillary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be put
in the ``helpers`` iterable. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be tuples with (<funtion_name>, <sympy_expression>, <arguments>). It
is mandatory to supply an argument sequence to helper routines.
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.autowrap import autowrap
>>> expr = ((x - y + z)**(13)).expand()
>>> binary_func = autowrap(expr)
>>> binary_func(1, 4, 2)
-1.0
"""
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = [helpers] if helpers else ()
flags = flags if flags else ()
args = list(args) if iterable(args, exclude=set) else args
code_generator = get_code_generator(language, "autowrap")
CodeWrapperClass = _get_code_wrapper_class(backend)
code_wrapper = CodeWrapperClass(code_generator, tempdir, flags, verbose)
helps = []
for name_h, expr_h, args_h in helpers:
helps.append(make_routine(name_h, expr_h, args_h))
for name_h, expr_h, args_h in helpers:
if expr.has(expr_h):
name_h = binary_function(name_h, expr_h, backend = 'dummy')
expr = expr.subs(expr_h, name_h(*args_h))
try:
routine = make_routine('autofunc', expr, args)
except CodeGenArgumentListError as e:
# if all missing arguments are for pure output, we simply attach them
# at the end and try again, because the wrappers will silently convert
# them to return values anyway.
new_args = []
for missing in e.missing_args:
if not isinstance(missing, OutputArgument):
raise
new_args.append(missing.name)
routine = make_routine('autofunc', expr, args + new_args)
return code_wrapper.wrap_code(routine, helpers=helps)
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def binary_function(symfunc, expr, **kwargs):
"""Returns a sympy function with expr as binary implementation
This is a convenience function that automates the steps needed to
autowrap the SymPy expression and attaching it to a Function object
with implemented_function().
>>> from sympy.abc import x, y
>>> from sympy.utilities.autowrap import binary_function
>>> expr = ((x - y)**(25)).expand()
>>> f = binary_function('f', expr)
>>> type(f)
<class 'sympy.core.function.UndefinedFunction'>
>>> 2*f(x, y)
2*f(x, y)
>>> f(x, y).evalf(2, subs={x: 1, y: 2})
-1.0
"""
binary = autowrap(expr, **kwargs)
return implemented_function(symfunc, binary)
#################################################################
# UFUNCIFY #
#################################################################
_ufunc_top = Template("""\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include ${include_file}
static PyMethodDef ${module}Methods[] = {
{NULL, NULL, 0, NULL}
};""")
_ufunc_outcalls = Template("*((double *)out${outnum}) = ${funcname}(${call_args});")
_ufunc_body = Template("""\
static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
${declare_args}
${declare_steps}
for (i = 0; i < n; i++) {
${outcalls}
${step_increments}
}
}
PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc};
static char ${funcname}_types[${n_types}] = ${types}
static void *${funcname}_data[1] = {NULL};""")
_ufunc_bottom = Template("""\
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"${module}",
NULL,
-1,
${module}Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_${module}(void)
{
PyObject *m, *d;
${function_creation}
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
return m;
}
#else
PyMODINIT_FUNC init${module}(void)
{
PyObject *m, *d;
${function_creation}
m = Py_InitModule("${module}", ${module}Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
}
#endif\
""")
_ufunc_init_form = Template("""\
ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out},
PyUFunc_None, "${module}", ${docstring}, 0);
PyDict_SetItemString(d, "${funcname}", ufunc${ind});
Py_DECREF(ufunc${ind});""")
_ufunc_setup = Template("""\
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('',
parent_package,
top_path)
config.add_extension('${module}', sources=['${module}.c', '${filename}.c'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)""")
class UfuncifyCodeWrapper(CodeWrapper):
"""Wrapper for Ufuncify"""
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def wrap_code(self, routines, helpers=None):
# This routine overrides CodeWrapper because we can't assume funcname == routines[0].name
# Therefore we have to break the CodeWrapper private API.
# There isn't an obvious way to extend multi-expr support to
# the other autowrap backends, so we limit this change to ufuncify.
helpers = helpers if helpers is not None else []
# We just need a consistent name
funcname = 'wrapped_' + str(id(routines) + id(helpers))
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routines, helpers)
self._prepare_files(routines, funcname)
self._process_files(routines)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, funcname)
def _generate_code(self, main_routines, helper_routines):
all_routines = main_routines + helper_routines
self.generator.write(
all_routines, self.filename, True, self.include_header,
self.include_empty)
def _prepare_files(self, routines, funcname):
# C
codefilename = self.module_name + '.c'
with open(codefilename, 'w') as f:
self.dump_c(routines, f, self.filename, funcname=funcname)
# setup.py
with open('setup.py', 'w') as f:
self.dump_setup(f)
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def dump_setup(self, f):
setup = _ufunc_setup.substitute(module=self.module_name,
filename=self.filename)
f.write(setup)
def dump_c(self, routines, f, prefix, funcname=None):
"""Write a C file with python wrappers
This file contains all the definitions of the routines in c code.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to name the imported module.
funcname
Name of the main function to be returned.
"""
if (funcname is None) and (len(routines) == 1):
funcname = routines[0].name
elif funcname is None:
raise ValueError('funcname must be specified for multiple output routines')
functions = []
function_creation = []
ufunc_init = []
module = self.module_name
include_file = "\"{0}.h\"".format(prefix)
top = _ufunc_top.substitute(include_file=include_file, module=module)
name = funcname
# Partition the C function arguments into categories
# Here we assume all routines accept the same arguments
r_index = 0
py_in, _ = self._partition_args(routines[0].arguments)
n_in = len(py_in)
n_out = len(routines)
# Declare Args
form = "char *{0}{1} = args[{2}];"
arg_decs = [form.format('in', i, i) for i in range(n_in)]
arg_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_args = '\n '.join(arg_decs)
# Declare Steps
form = "npy_intp {0}{1}_step = steps[{2}];"
step_decs = [form.format('in', i, i) for i in range(n_in)]
step_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_steps = '\n '.join(step_decs)
# Call Args
form = "*(double *)in{0}"
call_args = ', '.join([form.format(a) for a in range(n_in)])
# Step Increments
form = "{0}{1} += {0}{1}_step;"
step_incs = [form.format('in', i) for i in range(n_in)]
step_incs.extend([form.format('out', i, i) for i in range(n_out)])
step_increments = '\n '.join(step_incs)
# Types
n_types = n_in + n_out
types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};"
# Docstring
docstring = '"Created in SymPy with Ufuncify"'
# Function Creation
function_creation.append("PyObject *ufunc{0};".format(r_index))
# Ufunc initialization
init_form = _ufunc_init_form.substitute(module=module,
funcname=name,
docstring=docstring,
n_in=n_in, n_out=n_out,
ind=r_index)
ufunc_init.append(init_form)
outcalls = [_ufunc_outcalls.substitute(outnum=i, call_args=call_args,
funcname=routines[i].name) for i in range(n_out)]
body = _ufunc_body.substitute(module=module, funcname=name,
declare_args=declare_args,
declare_steps=declare_steps,
call_args=call_args,
step_increments=step_increments,
n_types=n_types, types=types, outcalls='\n '.join(outcalls))
functions.append(body)
body = '\n\n'.join(functions)
ufunc_init = '\n '.join(ufunc_init)
function_creation = '\n '.join(function_creation)
bottom = _ufunc_bottom.substitute(module=module,
ufunc_init=ufunc_init,
function_creation=function_creation)
text = [top, body, bottom]
f.write('\n\n'.join(text))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_in = []
py_out = []
for arg in args:
if isinstance(arg, OutputArgument):
py_out.append(arg)
elif isinstance(arg, InOutArgument):
raise ValueError("Ufuncify doesn't support InOutArguments")
else:
py_in.append(arg)
return py_in, py_out
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',))
def ufuncify(args, expr, language=None, backend='numpy', tempdir=None,
flags=None, verbose=False, helpers=None):
"""Generates a binary function that supports broadcasting on numpy arrays.
Parameters
----------
args : iterable
Either a Symbol or an iterable of symbols. Specifies the argument
sequence for the function.
expr
A SymPy expression that defines the element wise operation.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'numpy' [default],
'cython', or 'f2py'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
flags : iterable, optional
Additional option flags that will be passed to the backend
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : iterable, optional
Used to define auxillary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be put
in the ``helpers`` iterable. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be tuples with (<funtion_name>, <sympy_expression>, <arguments>). It
is mandatory to supply an argument sequence to helper routines.
Note
----
The default backend ('numpy') will create actual instances of
``numpy.ufunc``. These support ndimensional broadcasting, and implicit type
conversion. Use of the other backends will result in a "ufunc-like"
function, which requires equal length 1-dimensional arrays for all
arguments, and will not perform any type conversions.
References
----------
[1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
Examples
========
>>> from sympy.utilities.autowrap import ufuncify
>>> from sympy.abc import x, y
>>> import numpy as np
>>> f = ufuncify((x, y), y + x**2)
>>> type(f)
numpy.ufunc
>>> f([1, 2, 3], 2)
array([ 3., 6., 11.])
>>> f(np.arange(5), 3)
array([ 3., 4., 7., 12., 19.])
For the F2Py and Cython backends, inputs are required to be equal length
1-dimensional arrays. The F2Py backend will perform type conversion, but
the Cython backend will error if the inputs are not of the expected type.
>>> f_fortran = ufuncify((x, y), y + x**2, backend='F2Py')
>>> f_fortran(1, 2)
3
>>> f_fortran(numpy.array([1, 2, 3]), numpy.array([1.0, 2.0, 3.0]))
array([2., 6., 12.])
>>> f_cython = ufuncify((x, y), y + x**2, backend='Cython')
>>> f_cython(1, 2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int)
>>> f_cython(numpy.array([1.0]), numpy.array([2.0]))
array([ 3.])
"""
if isinstance(args, Symbol):
args = (args,)
else:
args = tuple(args)
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
if backend.upper() == 'NUMPY':
# maxargs is set by numpy compile-time constant NPY_MAXARGS
# If a future version of numpy modifies or removes this restriction
# this variable should be changed or removed
maxargs = 32
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
code_wrapper = UfuncifyCodeWrapper(CCodeGen("ufuncify"), tempdir,
flags, verbose)
if not isinstance(expr, (list, tuple)):
expr = [expr]
if len(expr) == 0:
raise ValueError('Expression iterable has zero length')
if (len(expr) + len(args)) > maxargs:
raise ValueError('Cannot create ufunc with more than {0} total arguments: got {1} in, {2} out'
.format(maxargs, len(args), len(expr)))
routines = [make_routine('autofunc{}'.format(idx), exprx, args) for idx, exprx in enumerate(expr)]
return code_wrapper.wrap_code(routines, helpers=helps)
else:
# Dummies are used for all added expressions to prevent name clashes
# within the original expression.
y = IndexedBase(Dummy())
m = Dummy(integer=True)
i = Idx(Dummy(integer=True), m)
f = implemented_function(Dummy().name, Lambda(args, expr))
# For each of the args create an indexed version.
indexed_args = [IndexedBase(Dummy(str(a))) for a in args]
# Order the arguments (out, args, dim)
args = [y] + indexed_args + [m]
args_with_indices = [a[i] for a in indexed_args]
return autowrap(Eq(y[i], f(*args_with_indices)), language, backend,
tempdir, args, flags, verbose, helpers)
| [
"659338505@qq.com"
] | 659338505@qq.com |
e1fc1deef26dac9c3fe7954d0144a110fed9b04b | 2e02cdfbd1db42158e2f81f09863cf39241f45f2 | /1-Follow up in Code Interview/Optional/32. Minimum Window Substring.py | d20ddda4e59145b003187b66fe03f6fbccc81a2a | [] | no_license | LingHsiLiu/Algorithm2 | e65927720dc046738816815cfb94caa49c060a81 | 680208e58c93d12e974e49d12e682ea5dcfab922 | refs/heads/master | 2020-04-15T01:08:20.177967 | 2019-10-15T01:27:06 | 2019-10-15T01:27:06 | 164,264,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | # 32. Minimum Window Substring
# Given a string source and a string target, find the minimum window in source which will contain all the characters in target.
# Example
# For source = "ADOBECODEBANC", target = "ABC", the minimum window is "BANC"
# Challenge
# Can you do it in time complexity O(n) ?
# Clarification
# Should the characters in minimum window has the same order in target?
# Not necessary.
# Notice
# If there is no such window in source that covers all characters in target, return the emtpy string "".
# If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in source.
# The target string may contain duplicate characters, the minimum window should cover all characters including the duplicate characters in target.
class Solution:
"""
@param source : A string
@param target: A string
@return: A string denote the minimum window, return "" if there is no such a string
"""
def minWindow(self, source , target):
# write your code here
if source is None:
return ""
targetHash = self.getTargetHash(target)
targetUniqueChars = len(targetHash)
matchedUniqueChars = 0
hash = {}
n = len(source)
j = 0
minLength = n + 1
minWindowString = ""
for i in range(n):
while j < n and matchedUniqueChars < targetUniqueChars:
if source[j] in targetHash:
hash[source[j]] = hash.get(source[j], 0) + 1
if hash[source[j]] == targetHash[source[j]]:
matchedUniqueChars += 1
j += 1
if j - i < minLength and matchedUniqueChars == targetUniqueChars:
minLength = j - i
minWindowString = source[i:j]
if source[i] in targetHash:
if hash[source[i]] == targetHash[source[j]]:
matchedUniqueChars -= 1
hash[source[i]] -= 1
return minWindowString
def getTargetHash(self, target):
hash = {}
for c in target:
hash[c] = hash.get(c, 0) + 1
return hash
| [
"noreply@github.com"
] | LingHsiLiu.noreply@github.com |
c4b4bcb7cef9bd41fb9e9d20acc31e9a8e294d9d | d9d1b72da9ff37d6c29c9c0063c2dc0f5b5a107e | /django_facebook/views.py | 28a18d17e49e10da4af01d68c3df6c9c09aa45ca | [
"BSD-3-Clause"
] | permissive | fogcitymarathoner/djfb | 83c02583544e740e7d368ecb37750628f71a31f4 | fd436b579a94585a2c085f2e630bd7050e178394 | refs/heads/master | 2016-08-06T00:23:42.933057 | 2013-04-04T18:27:22 | 2013-04-04T18:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,320 | py | from django.conf import settings
from django.contrib import messages
from django.http import Http404, HttpResponse
from django.shortcuts import redirect, render_to_response
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from django_facebook import exceptions as facebook_exceptions, \
settings as facebook_settings
from django_facebook.connect import CONNECT_ACTIONS, connect_user
from django_facebook.decorators import facebook_required_lazy
from django_facebook.utils import next_redirect, get_registration_backend, \
to_bool, error_next_redirect, get_instance_for
from open_facebook import exceptions as open_facebook_exceptions
from open_facebook.utils import send_warning
import logging
logger = logging.getLogger(__name__)
@csrf_exempt
@facebook_required_lazy(extra_params=dict(facebook_login='1'))
def connect(request, graph):
'''
Exception and validation functionality around the _connect view
Separated this out from _connect to preserve readability
Don't bother reading this code, skip to _connect for the bit you're interested in :)
'''
facebook_login = to_bool(request.REQUEST.get('facebook_login'))
context = RequestContext(request)
# validation to ensure the context processor is enabled
if not context.get('FACEBOOK_APP_ID'):
message = 'Please specify a Facebook app id and ensure the context processor is enabled'
raise ValueError(message)
# hide the connect page, convenient for testing with new users in production though
if not facebook_login and not settings.DEBUG and facebook_settings.FACEBOOK_HIDE_CONNECT_TEST:
raise Http404('not showing the connect page')
try:
response = _connect(request, facebook_login, graph)
except open_facebook_exceptions.FacebookUnreachable, e:
# often triggered when Facebook is slow
warning_format = u'%s, often caused by Facebook slowdown, error %s'
warn_message = warning_format % (type(e), e.message)
send_warning(warn_message, e=e)
response = error_next_redirect(request,
additional_params=dict(
fb_error_or_cancel=1)
)
return response
def _connect(request, facebook_login, graph):
'''
Handles the view logic around connect user
- (if authenticated) connect the user
- login
- register
We are already covered by the facebook_required_lazy decorator
So we know we either have a graph and permissions, or the user denied
the oAuth dialog
'''
backend = get_registration_backend()
context = RequestContext(request)
if facebook_login:
logger.info('trying to connect using Facebook')
if graph:
logger.info('found a graph object')
converter = get_instance_for('user_conversion', graph)
authenticated = converter.is_authenticated()
# Defensive programming :)
if not authenticated:
raise ValueError('didnt expect this flow')
logger.info('Facebook is authenticated')
facebook_data = converter.facebook_profile_data()
# either, login register or connect the user
try:
action, user = connect_user(request)
logger.info('Django facebook performed action: %s', action)
except facebook_exceptions.IncompleteProfileError, e:
# show them a registration form to add additional data
warning_format = u'Incomplete profile data encountered with error %s'
warn_message = warning_format % e.message
send_warning(warn_message, e=e,
facebook_data=facebook_data)
context['facebook_mode'] = True
context['form'] = e.form
return render_to_response(
facebook_settings.FACEBOOK_REGISTRATION_TEMPLATE,
context_instance=context,
)
except facebook_exceptions.AlreadyConnectedError, e:
user_ids = [u.user_id for u in e.users]
ids_string = ','.join(map(str, user_ids))
return error_next_redirect(
request,
additional_params=dict(already_connected=ids_string))
if action is CONNECT_ACTIONS.CONNECT:
# connect means an existing account was attached to facebook
messages.info(request, _("You have connected your account "
"to %s's facebook profile") % facebook_data['name'])
elif action is CONNECT_ACTIONS.REGISTER:
# hook for tying in specific post registration functionality
response = backend.post_registration_redirect(
request, user)
# compatibility for Django registration backends which return redirect tuples instead of a response
if not isinstance(response, HttpResponse):
to, args, kwargs = response
response = redirect(to, *args, **kwargs)
return response
else:
# the user denied the request
return error_next_redirect(
request,
additional_params=dict(fb_error_or_cancel='1'))
# for CONNECT and LOGIN we simple redirect to the next page
return next_redirect(request, default=facebook_settings.FACEBOOK_LOGIN_DEFAULT_REDIRECT)
return render_to_response('django_facebook/connect.html', context)
def disconnect(request):
'''
Removes Facebook from the users profile
And redirects to the specified next page
'''
if request.method == 'POST':
messages.info(
request, _("You have disconnected your Facebook profile."))
profile = request.user.get_profile()
profile.disconnect_facebook()
profile.save()
response = next_redirect(request)
return response
| [
"marc@fogtest.com"
] | marc@fogtest.com |
8a09cc29573b890e671f24fa77c061797bc1c907 | 7198ba93f84d088fe744fa022620d42f0f69f19a | /Chapter_5_code/build/ros_robotics/catkin_generated/generate_cached_setup.py | fe80f10654efb86931e10e3fa1d44c10129ffd3b | [
"MIT"
] | permissive | crepuscularlight/ROSbyExample | 114ae726f48cfef1b8e6443593c1204654453096 | fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c | refs/heads/main | 2023-04-04T16:26:36.933566 | 2021-04-23T11:16:50 | 2021-04-23T11:16:50 | 359,818,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_5_code/devel;/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_3_code/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_5_code/devel/.private/ros_robotics/env.sh')
output_filename = '/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_5_code/build/ros_robotics/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"18003316366@163.com"
] | 18003316366@163.com |
d34c109a84de5826b080a028d54820ca16d96185 | 60b6645ef4544ccda1146cd596b618e42b8715d8 | /product/migrations/0002_auto_20171118_1436.py | 6b186dacbf3f08b42dd146f7343988f363b5f9b7 | [] | no_license | mamun1980/innstal | 6534c879d9deab09f8b638b484db940f118d5d7d | 5dd8051cf955e9ec72fbfbcd1fdcb681ad6e95d7 | refs/heads/master | 2021-05-16T00:59:19.606698 | 2017-11-26T18:49:16 | 2017-11-26T18:49:16 | 106,926,549 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-18 14:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('product', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='productvisited',
name='visitor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='producttype',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductCategory'),
),
migrations.AddField(
model_name='productreview',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Product'),
),
migrations.AddField(
model_name='productreview',
name='writer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='product',
name='business',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='product',
name='product_brand',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductBrand'),
),
migrations.AddField(
model_name='product',
name='product_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductCategory'),
),
migrations.AddField(
model_name='product',
name='product_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductType'),
),
]
| [
"mamun1980@gmail.com"
] | mamun1980@gmail.com |
8202face95f1682cd8af34eaa2404537a2b43714 | 5a3547772b61f7d1b3a81f76dd1397eb92c68e7b | /slbo/envs/mujoco/ant_env.py | 3ae2cf2cfcd1fac068ba3239dece9a9f98e5dbef | [
"MIT"
] | permissive | suen049/AdMRL | 483440f0ded14e471d879b300da9afbab68fbe66 | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | refs/heads/master | 2023-03-12T23:15:05.154003 | 2021-03-06T15:31:21 | 2021-03-06T15:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | import numpy as np
from rllab.envs.mujoco import ant_env
from rllab.envs.base import Step
from slbo.envs import BaseModelBasedEnv
class AntEnv(ant_env.AntEnv, BaseModelBasedEnv):
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat, # 15
self.model.data.qvel.flat, # 14
# np.clip(self.model.data.cfrc_ext, -1, 1).flat, # 84
self.get_body_xmat("torso").flat, # 9
self.get_body_com("torso"), # 9
self.get_body_comvel("torso"), # 3
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def mb_step(self, states: np.ndarray, actions: np.ndarray, next_states: np.ndarray):
comvel = next_states[..., -3:]
forward_reward = comvel[..., 0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(actions / scaling), axis=-1)
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
notdone = np.all([next_states[..., 2] >= 0.2, next_states[..., 2] <= 1.0], axis=0)
return reward, 1. - notdone
| [
"linzichuan12@163.com"
] | linzichuan12@163.com |
8e16c78bb2b11eb3c5096dabf2382795cadc9840 | 7b7e11a180c36c94fe9559a01c792fe60e9ccb7f | /StockNest/stock_backend/migrations/0004_company_maxval.py | 0017536ae803acbf83ad91b7ec7bb88e7759f53b | [
"MIT"
] | permissive | pl-lee/Stock-Price-Forecasting-Using-Artificial-Intelligence | 8d833c0d87781d54ad371116cd96584a5b69a97b | 69192454542432c7120cbf95ea443b567a248400 | refs/heads/master | 2021-09-22T11:15:22.873347 | 2018-09-09T07:47:19 | 2018-09-09T07:47:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-26 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock_backend', '0003_auto_20170726_0910'),
]
operations = [
migrations.AddField(
model_name='company',
name='maxVal',
field=models.FloatField(default=0),
),
]
| [
"koriavinash1@gmail.com"
] | koriavinash1@gmail.com |
8ad3dd6a89b010a4f7d80ba5e5284a2c0f070bed | 22f1079c111df69711497a0a54f0be2d2556ba62 | /morpfw/crud/util.py | 5344d9a45c49eb2728886f6a4af0ad5941670b99 | [
"Apache-2.0"
] | permissive | morpframework/morpfw | 9b5d320b6d675d8d25d0021de41427c009595c78 | 1a11eb286097f0f6dd39f20e241dd83756ca87b3 | refs/heads/master | 2022-12-12T21:04:18.146975 | 2022-12-02T11:17:07 | 2022-12-02T11:17:07 | 104,162,256 | 8 | 2 | MIT | 2019-07-29T16:21:13 | 2017-09-20T03:43:29 | Python | UTF-8 | Python | false | false | 1,190 | py | import re
from morepath.publish import resolve_model as _resolve_model
from ..interfaces import ISchema
import jsl
import dataclasses
from copy import copy
import typing
from datetime import datetime, date
def resolve_model(request):
newreq = request.app.request_class(
request.environ.copy(), request.app, path_info=request.path
)
context = _resolve_model(newreq)
context.request = request
return context
_marker = object()
def generate_default(schema):
data = {}
if isinstance(schema, jsl.DocumentField):
schema = schema.document_cls
for n, f in schema._fields.items():
if isinstance(f, jsl.DocumentField):
data[n] = generate_default(f)
else:
data[n] = f.get_default()
if data[n] is None:
if isinstance(f, jsl.StringField):
data[n] = None
elif isinstance(f, jsl.IntField) or isinstance(f, jsl.NumberField):
data[n] = None
elif isinstance(f, jsl.DictField):
data[n] = {}
elif isinstance(f, jsl.ArrayField):
data[n] = []
return data
| [
"kagesenshi.87@gmail.com"
] | kagesenshi.87@gmail.com |
73d8e84c4b2426c9e2fd1d7db6b4a3ecaf94591d | f3d0fb72453d8f412ebad99c269334c3ae4f9caa | /tensorflow_probability/python/experimental/vi/surrogate_posteriors.py | 2107226014d96a74f8fab57c30a1349d39008c6c | [
"Apache-2.0"
] | permissive | beoy/probability | c118fc2e3b1ff9c1f12c386fa26a05989f4357b0 | a6e9ad705883782e91a6102ba54f4c48d9f76f6f | refs/heads/master | 2023-03-19T20:36:23.281196 | 2021-03-13T22:46:04 | 2021-03-13T22:47:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,377 | py | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for constructing surrogate posteriors."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
import collections
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.bijectors import chain
from tensorflow_probability.python.bijectors import identity
from tensorflow_probability.python.bijectors import invert
from tensorflow_probability.python.bijectors import joint_map
from tensorflow_probability.python.bijectors import reshape
from tensorflow_probability.python.bijectors import restructure
from tensorflow_probability.python.bijectors import scale as scale_lib
from tensorflow_probability.python.bijectors import scale_matvec_linear_operator
from tensorflow_probability.python.bijectors import shift
from tensorflow_probability.python.bijectors import sigmoid
from tensorflow_probability.python.bijectors import softplus
from tensorflow_probability.python.bijectors import split
from tensorflow_probability.python.distributions import beta
from tensorflow_probability.python.distributions import half_normal
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import joint_distribution
from tensorflow_probability.python.distributions import joint_distribution_auto_batched
from tensorflow_probability.python.distributions import joint_distribution_coroutine
from tensorflow_probability.python.distributions import joint_distribution_util
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import sample
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.distributions import truncated_normal
from tensorflow_probability.python.distributions import uniform
from tensorflow_probability.python.experimental.vi.util import trainable_linear_operators
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
Root = joint_distribution_coroutine.JointDistributionCoroutine.Root
_NON_STATISTICAL_PARAMS = [
'name', 'validate_args', 'allow_nan_stats', 'experimental_use_kahan_sum',
'reinterpreted_batch_ndims', 'dtype'
]
_NON_TRAINABLE_PARAMS = ['low', 'high']
ASVIParameters = collections.namedtuple(
'ASVIParameters', ['prior_weight', 'mean_field_parameter'])
def build_trainable_location_scale_distribution(initial_loc,
initial_scale,
event_ndims,
distribution_fn=normal.Normal,
validate_args=False,
name=None):
"""Builds a variational distribution from a location-scale family.
Args:
initial_loc: Float `Tensor` initial location.
initial_scale: Float `Tensor` initial scale.
event_ndims: Integer `Tensor` number of event dimensions in `initial_loc`.
distribution_fn: Optional constructor for a `tfd.Distribution` instance
in a location-scale family. This should have signature `dist =
distribution_fn(loc, scale, validate_args)`.
Default value: `tfd.Normal`.
validate_args: Python `bool`. Whether to validate input with asserts. This
imposes a runtime cost. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e.,
'build_trainable_location_scale_distribution').
Returns:
posterior_dist: A `tfd.Distribution` instance.
"""
with tf.name_scope(name or 'build_trainable_location_scale_distribution'):
dtype = dtype_util.common_dtype([initial_loc, initial_scale],
dtype_hint=tf.float32)
initial_loc = initial_loc * tf.ones(tf.shape(initial_scale), dtype=dtype)
initial_scale = initial_scale * tf.ones_like(initial_loc)
loc = tf.Variable(initial_value=initial_loc, name='loc')
scale = tfp_util.TransformedVariable(
initial_scale, softplus.Softplus(), name='scale')
posterior_dist = distribution_fn(loc=loc, scale=scale,
validate_args=validate_args)
# Ensure the distribution has the desired number of event dimensions.
static_event_ndims = tf.get_static_value(event_ndims)
if static_event_ndims is None or static_event_ndims > 0:
posterior_dist = independent.Independent(
posterior_dist,
reinterpreted_batch_ndims=event_ndims,
validate_args=validate_args)
return posterior_dist
def _get_event_shape_shallow_structure(event_shape):
"""Gets shallow structure, treating lists of ints at the leaves as atomic."""
def _not_list_of_ints(s):
if isinstance(s, list) or isinstance(s, tuple):
return not all(isinstance(x, int) for x in s)
return True
return nest.get_traverse_shallow_structure(_not_list_of_ints, event_shape)
# Default constructors for `build_factored_surrogate_posterior`.
_sample_uniform_initial_loc = functools.partial(
samplers.uniform, minval=-2., maxval=2., dtype=tf.float32)
_build_trainable_normal_dist = functools.partial(
build_trainable_location_scale_distribution,
distribution_fn=normal.Normal)
@deprecation.deprecated(
'2021-07-01',
'`build_factored_surrogate_posterior` is deprecated. Use '
'`build_affine_surrogate_posterior` with `operators="diag"` instead.')
@deprecation.deprecated_args(
'2021-03-15',
'`constraining_bijectors` is deprecated, use `bijector` instead',
'constraining_bijectors')
def build_factored_surrogate_posterior(
event_shape=None,
bijector=None,
constraining_bijectors=None,
initial_unconstrained_loc=_sample_uniform_initial_loc,
initial_unconstrained_scale=1e-2,
trainable_distribution_fn=_build_trainable_normal_dist,
seed=None,
validate_args=False,
name=None):
"""Builds a joint variational posterior that factors over model variables.
By default, this method creates an independent trainable Normal distribution
for each variable, transformed using a bijector (if provided) to
match the support of that variable. This makes extremely strong
assumptions about the posterior: that it is approximately normal (or
transformed normal), and that all model variables are independent.
Args:
event_shape: `Tensor` shape, or nested structure of `Tensor` shapes,
specifying the event shape(s) of the posterior variables.
bijector: Optional `tfb.Bijector` instance, or nested structure of such
instances, defining support(s) of the posterior variables. The structure
must match that of `event_shape` and may contain `None` values. A
posterior variable will be modeled as
`tfd.TransformedDistribution(underlying_dist, bijector)` if a
corresponding constraining bijector is specified, otherwise it is modeled
as supported on the unconstrained real line.
constraining_bijectors: Deprecated alias for `bijector`.
initial_unconstrained_loc: Optional Python `callable` with signature
`tensor = initial_unconstrained_loc(shape, seed)` used to sample
real-valued initializations for the unconstrained representation of each
variable. May alternately be a nested structure of
`Tensor`s, giving specific initial locations for each variable; these
must have structure matching `event_shape` and shapes determined by the
inverse image of `event_shape` under `bijector`, which may optionally be
prefixed with a common batch shape.
Default value: `functools.partial(tf.random.stateless_uniform,
minval=-2., maxval=2., dtype=tf.float32)`.
initial_unconstrained_scale: Optional scalar float `Tensor` initial
scale for the unconstrained distributions, or a nested structure of
`Tensor` initial scales for each variable.
Default value: `1e-2`.
trainable_distribution_fn: Optional Python `callable` with signature
`trainable_dist = trainable_distribution_fn(initial_loc, initial_scale,
event_ndims, validate_args)`. This is called for each model variable to
build the corresponding factor in the surrogate posterior. It is expected
that the distribution returned is supported on unconstrained real values.
Default value: `functools.partial(
tfp.experimental.vi.build_trainable_location_scale_distribution,
distribution_fn=tfd.Normal)`, i.e., a trainable Normal distribution.
seed: Python integer to seed the random number generator. This is used
only when `initial_loc` is not specified.
validate_args: Python `bool`. Whether to validate input with asserts. This
imposes a runtime cost. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'build_factored_surrogate_posterior').
Returns:
surrogate_posterior: A `tfd.Distribution` instance whose samples have
shape and structure matching that of `event_shape` or `initial_loc`.
### Examples
Consider a Gamma model with unknown parameters, expressed as a joint
Distribution:
```python
Root = tfd.JointDistributionCoroutine.Root
def model_fn():
concentration = yield Root(tfd.Exponential(1.))
rate = yield Root(tfd.Exponential(1.))
y = yield tfd.Sample(tfd.Gamma(concentration=concentration, rate=rate),
sample_shape=4)
model = tfd.JointDistributionCoroutine(model_fn)
```
Let's use variational inference to approximate the posterior over the
data-generating parameters for some observed `y`. We'll build a
surrogate posterior distribution by specifying the shapes of the latent
`rate` and `concentration` parameters, and that both are constrained to
be positive.
```python
surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(
event_shape=model.event_shape_tensor()[:-1], # Omit the observed `y`.
bijector=[tfb.Softplus(), # Rate is positive.
tfb.Softplus()]) # Concentration is positive.
```
This creates a trainable joint distribution, defined by variables in
`surrogate_posterior.trainable_variables`. We use `fit_surrogate_posterior`
to fit this distribution by minimizing a divergence to the true posterior.
```python
y = [0.2, 0.5, 0.3, 0.7]
losses = tfp.vi.fit_surrogate_posterior(
lambda rate, concentration: model.log_prob([rate, concentration, y]),
surrogate_posterior=surrogate_posterior,
num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
sample_size=10)
# After optimization, samples from the surrogate will approximate
# samples from the true posterior.
samples = surrogate_posterior.sample(100)
posterior_mean = [tf.reduce_mean(x) for x in samples] # mean ~= [1.1, 2.1]
posterior_std = [tf.math.reduce_std(x) for x in samples] # std ~= [0.3, 0.8]
```
If we wanted to initialize the optimization at a specific location, we can
specify one when we build the surrogate posterior. This function requires the
initial location to be specified in *unconstrained* space; we do this by
inverting the constraining bijectors (note this section also demonstrates the
creation of a dict-structured model).
```python
initial_loc = {'concentration': 0.4, 'rate': 0.2}
bijector={'concentration': tfb.Softplus(), # Rate is positive.
'rate': tfb.Softplus()} # Concentration is positive.
initial_unconstrained_loc = tf.nest.map_fn(
lambda b, x: b.inverse(x) if b is not None else x, bijector, initial_loc)
surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(
event_shape=tf.nest.map_fn(tf.shape, initial_loc),
bijector=bijector,
initial_unconstrained_loc=initial_unconstrained_state,
initial_unconstrained_scale=1e-4)
```
"""
with tf.name_scope(name or 'build_factored_surrogate_posterior'):
bijector = deprecation.deprecated_argument_lookup(
'bijector', bijector, 'constraining_bijectors', constraining_bijectors)
seed = tfp_util.SeedStream(seed, salt='build_factored_surrogate_posterior')
# Convert event shapes to Tensors.
shallow_structure = _get_event_shape_shallow_structure(event_shape)
event_shape = nest.map_structure_up_to(
shallow_structure, lambda s: tf.convert_to_tensor(s, dtype=tf.int32),
event_shape)
if nest.is_nested(bijector):
bijector = nest.map_structure(
lambda b: identity.Identity() if b is None else b,
bijector)
# Support mismatched nested structures for backwards compatibility (e.g.
# non-nested `event_shape` and a single-element list of `bijector`s).
bijector = nest.pack_sequence_as(event_shape, nest.flatten(bijector))
event_space_bijector = joint_map.JointMap(
bijector, validate_args=validate_args)
else:
event_space_bijector = bijector
if event_space_bijector is None:
unconstrained_event_shape = event_shape
else:
unconstrained_event_shape = (
event_space_bijector.inverse_event_shape_tensor(event_shape))
# Construct initial locations for the internal unconstrained dists.
if callable(initial_unconstrained_loc): # Sample random initialization.
initial_unconstrained_loc = nest.map_structure(
lambda s: initial_unconstrained_loc(shape=s, seed=seed()),
unconstrained_event_shape)
if not nest.is_nested(initial_unconstrained_scale):
initial_unconstrained_scale = nest.map_structure(
lambda _: initial_unconstrained_scale,
unconstrained_event_shape)
# Extract the rank of each event, so that we build distributions with the
# correct event shapes.
unconstrained_event_ndims = nest.map_structure(
ps.rank_from_shape,
unconstrained_event_shape)
# Build the component surrogate posteriors.
unconstrained_distributions = nest.map_structure_up_to(
unconstrained_event_shape,
lambda loc, scale, ndims: trainable_distribution_fn( # pylint: disable=g-long-lambda
loc, scale, ndims, validate_args=validate_args),
initial_unconstrained_loc,
initial_unconstrained_scale,
unconstrained_event_ndims)
base_distribution = (
joint_distribution_util.independent_joint_distribution_from_structure(
unconstrained_distributions, validate_args=validate_args))
if event_space_bijector is None:
return base_distribution
return transformed_distribution.TransformedDistribution(
base_distribution, event_space_bijector)
def _as_trainable_family(distribution):
"""Substitutes prior distributions with more easily trainable ones."""
with tf.name_scope('as_trainable_family'):
if isinstance(distribution, half_normal.HalfNormal):
return truncated_normal.TruncatedNormal(
loc=0.,
scale=distribution.scale,
low=0.,
high=distribution.scale * 10.)
elif isinstance(distribution, uniform.Uniform):
return shift.Shift(distribution.low)(
scale_lib.Scale(distribution.high - distribution.low)(beta.Beta(
concentration0=tf.ones(
distribution.event_shape_tensor(), dtype=distribution.dtype),
concentration1=1.)))
else:
return distribution
def _make_asvi_trainable_variables(prior,
mean_field=False,
initial_prior_weight=0.5):
"""Generates parameter dictionaries given a prior distribution and list."""
with tf.name_scope('make_asvi_trainable_variables'):
param_dicts = []
prior_dists = prior._get_single_sample_distributions() # pylint: disable=protected-access
for dist in prior_dists:
original_dist = dist.distribution if isinstance(dist, Root) else dist
substituted_dist = _as_trainable_family(original_dist)
# Grab the base distribution if it exists
try:
actual_dist = substituted_dist.distribution
except AttributeError:
actual_dist = substituted_dist
new_params_dict = {}
# Build trainable ASVI representation for each distribution's parameters.
parameter_properties = actual_dist.parameter_properties(
dtype=actual_dist.dtype)
if isinstance(original_dist, sample.Sample):
posterior_batch_shape = ps.concat([
actual_dist.batch_shape_tensor(),
distribution_util.expand_to_vector(original_dist.sample_shape)
], axis=0)
else:
posterior_batch_shape = actual_dist.batch_shape_tensor()
for param, value in actual_dist.parameters.items():
if param in (_NON_STATISTICAL_PARAMS +
_NON_TRAINABLE_PARAMS) or value is None:
continue
actual_event_shape = parameter_properties[param].shape_fn(
actual_dist.event_shape_tensor())
try:
bijector = parameter_properties[
param].default_constraining_bijector_fn()
except NotImplementedError:
bijector = identity.Identity()
if mean_field:
prior_weight = None
else:
unconstrained_ones = tf.ones(
shape=ps.concat([
posterior_batch_shape,
bijector.inverse_event_shape_tensor(
actual_event_shape)
], axis=0),
dtype=tf.convert_to_tensor(value).dtype)
prior_weight = tfp_util.TransformedVariable(
initial_prior_weight * unconstrained_ones,
bijector=sigmoid.Sigmoid(),
name='prior_weight/{}/{}'.format(dist.name, param))
# If the prior distribution was a tfd.Sample wrapping a base
# distribution, we want to give every single sample in the prior its
# own lambda and alpha value (rather than having a single lambda and
# alpha).
if isinstance(original_dist, sample.Sample):
value = tf.reshape(
value,
ps.concat([
actual_dist.batch_shape_tensor(),
ps.ones(ps.rank_from_shape(original_dist.sample_shape)),
actual_event_shape
],
axis=0))
value = tf.broadcast_to(
value,
ps.concat([posterior_batch_shape, actual_event_shape], axis=0))
new_params_dict[param] = ASVIParameters(
prior_weight=prior_weight,
mean_field_parameter=tfp_util.TransformedVariable(
value,
bijector=bijector,
name='mean_field_parameter/{}/{}'.format(dist.name, param)))
param_dicts.append(new_params_dict)
return param_dicts
# TODO(kateslin): Add support for models with prior+likelihood written as
# a single JointDistribution.
def build_asvi_surrogate_posterior(prior,
mean_field=False,
initial_prior_weight=0.5,
name=None):
"""Builds a structured surrogate posterior inspired by conjugate updating.
ASVI, or Automatic Structured Variational Inference, was proposed by
Ambrogioni et al. (2020) [1] as a method of automatically constructing a
surrogate posterior with the same structure as the prior. It does this by
reparameterizing the variational family of the surrogate posterior by
structuring each parameter according to the equation
```none
prior_weight * prior_parameter + (1 - prior_weight) * mean_field_parameter
```
In this equation, `prior_parameter` is a vector of prior parameters and
`mean_field_parameter` is a vector of trainable parameters with the same
domain as `prior_parameter`. `prior_weight` is a vector of learnable
parameters where `0. <= prior_weight <= 1.`. When `prior_weight =
0`, the surrogate posterior will be a mean-field surrogate, and when
`prior_weight = 1.`, the surrogate posterior will be the prior. This convex
combination equation, inspired by conjugacy in exponential families, thus
allows the surrogate posterior to balance between the structure of the prior
and the structure of a mean-field approximation.
Args:
prior: tfd.JointDistribution instance of the prior.
mean_field: Optional Python boolean. If `True`, creates a degenerate
surrogate distribution in which all variables are independent,
ignoring the prior dependence structure. Default value: `False`.
initial_prior_weight: Optional float value (either static or tensor value)
on the interval [0, 1]. A larger value creates an initial surrogate
distribution with more dependence on the prior structure. Default value:
`0.5`.
name: Optional string. Default value: `build_asvi_surrogate_posterior`.
Returns:
surrogate_posterior: A `tfd.JointDistributionCoroutineAutoBatched` instance
whose samples have shape and structure matching that of `prior`.
Raises:
TypeError: The `prior` argument cannot be a nested `JointDistribution`.
### Examples
Consider a Brownian motion model expressed as a JointDistribution:
```python
prior_loc = 0.
innovation_noise = .1
def model_fn():
new = yield tfd.Normal(loc=prior_loc, scale=innovation_noise)
for i in range(4):
new = yield tfd.Normal(loc=new, scale=innovation_noise)
prior = tfd.JointDistributionCoroutineAutoBatched(model_fn)
```
Let's use variational inference to approximate the posterior. We'll build a
surrogate posterior distribution by feeding in the prior distribution.
```python
surrogate_posterior =
tfp.experimental.vi.build_asvi_surrogate_posterior(prior)
```
This creates a trainable joint distribution, defined by variables in
`surrogate_posterior.trainable_variables`. We use `fit_surrogate_posterior`
to fit this distribution by minimizing a divergence to the true posterior.
```python
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
sample_size=10)
# After optimization, samples from the surrogate will approximate
# samples from the true posterior.
samples = surrogate_posterior.sample(100)
posterior_mean = [tf.reduce_mean(x) for x in samples]
posterior_std = [tf.math.reduce_std(x) for x in samples]
```
#### References
[1]: Luca Ambrogioni, Max Hinne, Marcel van Gerven. Automatic structured
variational inference. _arXiv preprint arXiv:2002.00643_, 2020
https://arxiv.org/abs/2002.00643
"""
with tf.name_scope(name or 'build_asvi_surrogate_posterior'):
param_dicts = _make_asvi_trainable_variables(
prior=prior,
mean_field=mean_field,
initial_prior_weight=initial_prior_weight)
def posterior_generator():
prior_gen = prior._model_coroutine() # pylint: disable=protected-access
dist = next(prior_gen)
i = 0
try:
while True:
original_dist = dist.distribution if isinstance(dist, Root) else dist
if isinstance(original_dist, joint_distribution.JointDistribution):
# TODO(kateslin): Build inner JD surrogate in
# _make_asvi_trainable_variables to avoid rebuilding variables.
raise TypeError(
'Argument `prior` cannot be a nested `JointDistribution`.')
else:
original_dist = _as_trainable_family(original_dist)
try:
actual_dist = original_dist.distribution
except AttributeError:
actual_dist = original_dist
dist_params = actual_dist.parameters
temp_params_dict = {}
for param, value in dist_params.items():
if param in (_NON_STATISTICAL_PARAMS +
_NON_TRAINABLE_PARAMS) or value is None:
temp_params_dict[param] = value
else:
prior_weight = param_dicts[i][param].prior_weight
mean_field_parameter = param_dicts[i][
param].mean_field_parameter
if mean_field:
temp_params_dict[param] = mean_field_parameter
else:
temp_params_dict[param] = prior_weight * value + (
1. - prior_weight) * mean_field_parameter
if isinstance(original_dist, sample.Sample):
inner_dist = type(actual_dist)(**temp_params_dict)
surrogate_dist = independent.Independent(
inner_dist,
reinterpreted_batch_ndims=ps.rank_from_shape(
original_dist.sample_shape))
else:
surrogate_dist = type(actual_dist)(**temp_params_dict)
if isinstance(original_dist,
transformed_distribution.TransformedDistribution):
surrogate_dist = transformed_distribution.TransformedDistribution(
surrogate_dist, bijector=original_dist.bijector)
if isinstance(original_dist, independent.Independent):
surrogate_dist = independent.Independent(
surrogate_dist,
reinterpreted_batch_ndims=original_dist
.reinterpreted_batch_ndims)
if isinstance(dist, Root):
value_out = yield Root(surrogate_dist)
else:
value_out = yield surrogate_dist
dist = prior_gen.send(value_out)
i += 1
except StopIteration:
pass
surrogate_posterior = (
joint_distribution_auto_batched.JointDistributionCoroutineAutoBatched(
posterior_generator))
# Ensure that the surrogate posterior structure matches that of the prior
try:
nest.assert_same_structure(prior.dtype, surrogate_posterior.dtype)
except TypeError:
tokenize = lambda jd: jd._model_unflatten( # pylint: disable=protected-access, g-long-lambda
range(len(jd._model_flatten(jd.dtype))) # pylint: disable=protected-access
)
surrogate_posterior = restructure.Restructure(
output_structure=tokenize(prior),
input_structure=tokenize(surrogate_posterior))(
surrogate_posterior)
surrogate_posterior.also_track = param_dicts
return surrogate_posterior
def build_affine_surrogate_posterior(
event_shape,
operators='diag',
bijector=None,
base_distribution=normal.Normal,
dtype=tf.float32,
seed=None,
validate_args=False,
name=None):
"""Builds a joint variational posterior with a given `event_shape`.
This function builds a surrogate posterior by applying a trainable
transformation to a standard base distribution and constraining the samples
with `bijector`. The surrogate posterior has event shape equal to
the input `event_shape`.
This function is a convenience wrapper around
`build_affine_surrogate_posterior_from_base_distribution` that allows the
user to pass in the desired posterior `event_shape` instead of
pre-constructed base distributions (at the expense of full control over the
base distribution types and parameterizations).
Args:
event_shape: (Nested) event shape of the posterior.
operators: Either a string or a list/tuple containing `LinearOperator`
subclasses, `LinearOperator` instances, or callables returning
`LinearOperator` instances. Supported string values are "diag" (to create
a mean-field surrogate posterior) and "tril" (to create a full-covariance
surrogate posterior). A list/tuple may be passed to induce other
posterior covariance structures. If the list is flat, a
`tf.linalg.LinearOperatorBlockDiag` instance will be created and applied
to the base distribution. Otherwise the list must be singly-nested and
have a first element of length 1, second element of length 2, etc.; the
elements of the outer list are interpreted as rows of a lower-triangular
block structure, and a `tf.linalg.LinearOperatorBlockLowerTriangular`
instance is created. For complete documentation and examples, see
`tfp.experimental.vi.util.build_trainable_linear_operator_block`, which
receives the `operators` arg if it is list-like.
Default value: `"diag"`.
bijector: `tfb.Bijector` instance, or nested structure of `tfb.Bijector`
instances, that maps (nested) values in R^n to the support of the
posterior. (This can be the `experimental_default_event_space_bijector` of
the distribution over the prior latent variables.)
Default value: `None` (i.e., the posterior is over R^n).
base_distribution: A `tfd.Distribution` subclass parameterized by `loc` and
`scale`. The base distribution of the transformed surrogate has `loc=0.`
and `scale=1.`.
Default value: `tfd.Normal`.
dtype: The `dtype` of the surrogate posterior.
Default value: `tf.float32`.
seed: Python integer to seed the random number generator for initial values.
Default value: `None`.
validate_args: Python `bool`. Whether to validate input with asserts. This
imposes a runtime cost. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'build_affine_surrogate_posterior').
Returns:
surrogate_distribution: Trainable `tfd.Distribution` with event shape equal
to `event_shape`.
#### Examples
```python
tfd = tfp.distributions
tfb = tfp.bijectors
# Define a joint probabilistic model.
Root = tfd.JointDistributionCoroutine.Root
def model_fn():
concentration = yield Root(tfd.Exponential(1.))
rate = yield Root(tfd.Exponential(1.))
y = yield tfd.Sample(
tfd.Gamma(concentration=concentration, rate=rate),
sample_shape=4)
model = tfd.JointDistributionCoroutine(model_fn)
# Assume the `y` are observed, such that the posterior is a joint distribution
# over `concentration` and `rate`. The posterior event shape is then equal to
# the first two components of the model's event shape.
posterior_event_shape = model.event_shape_tensor()[:-1]
# Constrain the posterior values to be positive using the `Exp` bijector.
bijector = [tfb.Exp(), tfb.Exp()]
# Build a full-covariance surrogate posterior.
surrogate_posterior = (
tfp.experimental.vi.build_affine_surrogate_posterior(
event_shape=posterior_event_shape,
operators='tril',
bijector=bijector))
# For an example defining `'operators'` as a list to express an alternative
# covariance structure, see
# `build_affine_surrogate_posterior_from_base_distribution`.
# Fit the model.
y = [0.2, 0.5, 0.3, 0.7]
target_model = model.experimental_pin(y=y)
losses = tfp.vi.fit_surrogate_posterior(
target_model.unnormalized_log_prob,
surrogate_posterior,
num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
sample_size=10)
```
"""
with tf.name_scope(name or 'build_affine_surrogate_posterior'):
event_shape = nest.map_structure_up_to(
_get_event_shape_shallow_structure(event_shape),
lambda s: tf.convert_to_tensor(s, dtype=tf.int32),
event_shape)
if nest.is_nested(bijector):
bijector = joint_map.JointMap(
nest.map_structure(
lambda b: identity.Identity() if b is None else b,
bijector), validate_args=validate_args)
if bijector is None:
unconstrained_event_shape = event_shape
else:
unconstrained_event_shape = (
bijector.inverse_event_shape_tensor(event_shape))
standard_base_distribution = nest.map_structure(
lambda s: sample.Sample( # pylint: disable=g-long-lambda
base_distribution(loc=tf.zeros([], dtype=dtype), scale=1.),
sample_shape=s, validate_args=validate_args),
unconstrained_event_shape)
return build_affine_surrogate_posterior_from_base_distribution(
standard_base_distribution,
operators=operators,
bijector=bijector,
seed=seed,
validate_args=validate_args)
def build_affine_surrogate_posterior_from_base_distribution(
base_distribution,
operators='diag',
bijector=None,
initial_unconstrained_loc_fn=_sample_uniform_initial_loc,
seed=None,
validate_args=False,
name=None):
"""Builds a variational posterior by linearly transforming base distributions.
This function builds a surrogate posterior by applying a trainable
transformation to a base distribution (typically a `tfd.JointDistribution`) or
nested structure of base distributions, and constraining the samples with
`bijector`. Note that the distributions must have event shapes corresponding
to the *pretransformed* surrogate posterior -- that is, if `bijector` contains
a shape-changing bijector, then the corresponding base distribution event
shape is the inverse event shape of the bijector applied to the desired
surrogate posterior shape. The surrogate posterior is constucted as follows:
1. Flatten the base distribution event shapes to vectors, and pack the base
distributions into a `tfd.JointDistribution`.
2. Apply a trainable blockwise LinearOperator bijector to the joint base
distribution.
3. Apply the constraining bijectors and return the resulting trainable
`tfd.TransformedDistribution` instance.
Args:
base_distribution: `tfd.Distribution` instance (typically a
`tfd.JointDistribution`), or a nested structure of `tfd.Distribution`
instances.
operators: Either a string or a list/tuple containing `LinearOperator`
subclasses, `LinearOperator` instances, or callables returning
`LinearOperator` instances. Supported string values are "diag" (to create
a mean-field surrogate posterior) and "tril" (to create a full-covariance
surrogate posterior). A list/tuple may be passed to induce other
posterior covariance structures. If the list is flat, a
`tf.linalg.LinearOperatorBlockDiag` instance will be created and applied
to the base distribution. Otherwise the list must be singly-nested and
have a first element of length 1, second element of length 2, etc.; the
elements of the outer list are interpreted as rows of a lower-triangular
block structure, and a `tf.linalg.LinearOperatorBlockLowerTriangular`
instance is created. For complete documentation and examples, see
`tfp.experimental.vi.util.build_trainable_linear_operator_block`, which
receives the `operators` arg if it is list-like.
Default value: `"diag"`.
bijector: `tfb.Bijector` instance, or nested structure of `tfb.Bijector`
instances, that maps (nested) values in R^n to the support of the
posterior. (This can be the `experimental_default_event_space_bijector` of
the distribution over the prior latent variables.)
Default value: `None` (i.e., the posterior is over R^n).
initial_unconstrained_loc_fn: Optional Python `callable` with signature
`initial_loc = initial_unconstrained_loc_fn(shape, dtype, seed)` used to
sample real-valued initializations for the unconstrained location of
each variable.
Default value: `functools.partial(tf.random.stateless_uniform,
minval=-2., maxval=2., dtype=tf.float32)`.
seed: Python integer to seed the random number generator for initial values.
Default value: `None`.
validate_args: Python `bool`. Whether to validate input with asserts. This
imposes a runtime cost. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e.,
'build_affine_surrogate_posterior_from_base_distribution').
Returns:
surrogate_distribution: Trainable `tfd.JointDistribution` instance.
Raises:
NotImplementedError: Base distributions with mixed dtypes are not supported.
#### Examples
```python
tfd = tfp.distributions
tfb = tfp.bijectors
# Fit a multivariate Normal surrogate posterior on the Eight Schools model
# [1].
treatment_effects = [28., 8., -3., 7., -1., 1., 18., 12.]
treatment_stddevs = [15., 10., 16., 11., 9., 11., 10., 18.]
def model_fn():
avg_effect = yield tfd.Normal(loc=0., scale=10., name='avg_effect')
log_stddev = yield tfd.Normal(loc=5., scale=1., name='log_stddev')
school_effects = yield tfd.Sample(
tfd.Normal(loc=avg_effect, scale=tf.exp(log_stddev)),
sample_shape=[8],
name='school_effects')
treatment_effects = yield tfd.Independent(
tfd.Normal(loc=school_effects, scale=treatment_stddevs),
reinterpreted_batch_ndims=1,
name='treatment_effects')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn)
# Pin the observed values in the model.
target_model = model.experimental_pin(treatment_effects=treatment_effects)
# Define a lower triangular structure of `LinearOperator` subclasses that
# models full covariance among latent variables except for the 8 dimensions
# of `school_effect`, which are modeled as independent (using
# `LinearOperatorDiag`).
operators = [
[tf.linalg.LinearOperatorLowerTriangular],
[tf.linalg.LinearOperatorFullMatrix, LinearOperatorLowerTriangular],
[tf.linalg.LinearOperatorFullMatrix, LinearOperatorFullMatrix,
tf.linalg.LinearOperatorDiag]]
# Constrain the posterior values to the support of the prior.
bijector = target_model.experimental_default_event_space_bijector()
# Build a full-covariance surrogate posterior.
surrogate_posterior = (
tfp.experimental.vi.build_affine_surrogate_posterior_from_base_distribution(
base_distribution=base_distribution,
operators=operators,
bijector=bijector))
# Fit the model.
losses = tfp.vi.fit_surrogate_posterior(
target_model.unnormalized_log_prob,
surrogate_posterior,
num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
sample_size=10)
```
#### References
[1] Andrew Gelman, John Carlin, Hal Stern, David Dunson, Aki Vehtari, and
Donald Rubin. Bayesian Data Analysis, Third Edition.
Chapman and Hall/CRC, 2013.
"""
with tf.name_scope(
name or 'build_affine_surrogate_posterior_from_base_distribution'):
if nest.is_nested(base_distribution):
base_distribution = (
joint_distribution_util.independent_joint_distribution_from_structure(
base_distribution, validate_args=validate_args))
if nest.is_nested(bijector):
bijector = joint_map.JointMap(
nest.map_structure(
lambda b: identity.Identity() if b is None else b, bijector),
validate_args=validate_args)
event_shape = base_distribution.event_shape_tensor()
flat_event_size = nest.flatten(
nest.map_structure(ps.reduce_prod, event_shape))
base_dtypes = set(nest.flatten(base_distribution.dtype))
if len(base_dtypes) > 1:
raise NotImplementedError(
'Base distributions with mixed dtype are not supported. Saw '
'components of dtype {}'.format(base_dtypes))
base_dtype = list(base_dtypes)[0]
num_components = len(flat_event_size)
if operators == 'diag':
operators = [tf.linalg.LinearOperatorDiag] * num_components
elif operators == 'tril':
operators = [
[tf.linalg.LinearOperatorFullMatrix] * i
+ [tf.linalg.LinearOperatorLowerTriangular]
for i in range(num_components)]
elif isinstance(operators, str):
raise ValueError(
'Unrecognized operator type {}. Valid operators are "diag", "tril", '
'or a structure that can be passed to '
'`tfp.experimental.vi.util.build_trainable_linear_operator_block` as '
'the `operators` arg.'.format(operators))
if nest.is_nested(operators):
seed, operators_seed = samplers.split_seed(seed)
operators = (
trainable_linear_operators.build_trainable_linear_operator_block(
operators,
block_dims=flat_event_size,
dtype=base_dtype,
seed=operators_seed))
linop_bijector = (
scale_matvec_linear_operator.ScaleMatvecLinearOperatorBlock(
scale=operators, validate_args=validate_args))
loc_bijector = joint_map.JointMap(
tf.nest.map_structure(
lambda s, seed: shift.Shift( # pylint: disable=g-long-lambda
tf.Variable(
initial_unconstrained_loc_fn(
[s], dtype=base_dtype, seed=seed))),
flat_event_size,
samplers.split_seed(seed, n=len(flat_event_size))),
validate_args=validate_args)
unflatten_and_reshape = chain.Chain(
[joint_map.JointMap(
nest.map_structure(reshape.Reshape, event_shape),
validate_args=validate_args),
restructure.Restructure(
nest.pack_sequence_as(event_shape, range(num_components)))],
validate_args=validate_args)
bijectors = [] if bijector is None else [bijector]
bijectors.extend(
[unflatten_and_reshape,
loc_bijector, # Allow the mean of the standard dist to shift from 0.
linop_bijector]) # Apply LinOp to scale the standard dist.
bijector = chain.Chain(bijectors, validate_args=validate_args)
flat_base_distribution = invert.Invert(
unflatten_and_reshape)(base_distribution)
return transformed_distribution.TransformedDistribution(
flat_base_distribution, bijector=bijector, validate_args=validate_args)
def build_split_flow_surrogate_posterior(
event_shape,
trainable_bijector,
constraining_bijector=None,
base_distribution=normal.Normal,
batch_shape=(),
dtype=tf.float32,
validate_args=False,
name=None):
"""Builds a joint variational posterior by splitting a normalizing flow.
Args:
event_shape: (Nested) event shape of the surrogate posterior.
trainable_bijector: A trainable `tfb.Bijector` instance that operates on
`Tensor`s (not structures), e.g. `tfb.MaskedAutoregressiveFlow` or
`tfb.RealNVP`. This bijector transforms the base distribution before it is
split.
constraining_bijector: `tfb.Bijector` instance, or nested structure of
`tfb.Bijector` instances, that maps (nested) values in R^n to the support
of the posterior. (This can be the
`experimental_default_event_space_bijector` of the distribution over the
prior latent variables.)
Default value: `None` (i.e., the posterior is over R^n).
base_distribution: A `tfd.Distribution` subclass parameterized by `loc` and
`scale`. The base distribution for the transformed surrogate has `loc=0.`
and `scale=1.`.
Default value: `tfd.Normal`.
batch_shape: The `batch_shape` of the output distribution.
Default value: `()`.
dtype: The `dtype` of the surrogate posterior.
Default value: `tf.float32`.
validate_args: Python `bool`. Whether to validate input with asserts. This
imposes a runtime cost. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'build_split_flow_surrogate_posterior').
Returns:
surrogate_distribution: Trainable `tfd.TransformedDistribution` with event
shape equal to `event_shape`.
### Examples
```python
# Train a normalizing flow on the Eight Schools model [1].
treatment_effects = [28., 8., -3., 7., -1., 1., 18., 12.]
treatment_stddevs = [15., 10., 16., 11., 9., 11., 10., 18.]
model = tfd.JointDistributionNamed({
'avg_effect':
tfd.Normal(loc=0., scale=10., name='avg_effect'),
'log_stddev':
tfd.Normal(loc=5., scale=1., name='log_stddev'),
'school_effects':
lambda log_stddev, avg_effect: (
tfd.Independent(
tfd.Normal(
loc=avg_effect[..., None] * tf.ones(8),
scale=tf.exp(log_stddev[..., None]) * tf.ones(8),
name='school_effects'),
reinterpreted_batch_ndims=1)),
'treatment_effects': lambda school_effects: tfd.Independent(
tfd.Normal(loc=school_effects, scale=treatment_stddevs),
reinterpreted_batch_ndims=1)
})
# Pin the observed values in the model.
target_model = model.experimental_pin(treatment_effects=treatment_effects)
# Create a Masked Autoregressive Flow bijector.
net = tfb.AutoregressiveNetwork(2, hidden_units=[16, 16], dtype=tf.float32)
maf = tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=net)
# Build and fit the surrogate posterior.
surrogate_posterior = (
tfp.experimental.vi.build_split_flow_surrogate_posterior(
event_shape=target_model.event_shape_tensor(),
trainable_bijector=maf,
constraining_bijector=(
target_model.experimental_default_event_space_bijector())))
losses = tfp.vi.fit_surrogate_posterior(
target_model.unnormalized_log_prob,
surrogate_posterior,
num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
sample_size=10)
```
#### References
[1] Andrew Gelman, John Carlin, Hal Stern, David Dunson, Aki Vehtari, and
Donald Rubin. Bayesian Data Analysis, Third Edition.
Chapman and Hall/CRC, 2013.
"""
with tf.name_scope(name or 'build_split_flow_surrogate_posterior'):
shallow_structure = _get_event_shape_shallow_structure(event_shape)
event_shape = nest.map_structure_up_to(
shallow_structure, ps.convert_to_shape_tensor, event_shape)
if nest.is_nested(constraining_bijector):
constraining_bijector = joint_map.JointMap(
nest.map_structure(
lambda b: identity.Identity() if b is None else b,
constraining_bijector), validate_args=validate_args)
if constraining_bijector is None:
unconstrained_event_shape = event_shape
else:
unconstrained_event_shape = (
constraining_bijector.inverse_event_shape_tensor(event_shape))
flat_base_event_shape = nest.flatten(unconstrained_event_shape)
flat_base_event_size = nest.map_structure(
tf.reduce_prod, flat_base_event_shape)
event_size = tf.reduce_sum(flat_base_event_size)
base_distribution = sample.Sample(
base_distribution(tf.zeros(batch_shape, dtype=dtype), scale=1.),
[event_size])
# After transforming base distribution samples with `trainable_bijector`,
# split them into vector-valued components.
split_bijector = split.Split(
flat_base_event_size, validate_args=validate_args)
# Reshape the vectors to the correct posterior event shape.
event_reshape = joint_map.JointMap(
nest.map_structure(reshape.Reshape, unconstrained_event_shape),
validate_args=validate_args)
# Restructure the flat list of components to the correct posterior
# structure.
event_unflatten = restructure.Restructure(
nest.pack_sequence_as(
unconstrained_event_shape, range(len(flat_base_event_shape))))
bijectors = [] if constraining_bijector is None else [constraining_bijector]
bijectors.extend(
[event_reshape, event_unflatten, split_bijector, trainable_bijector])
bijector = chain.Chain(bijectors, validate_args=validate_args)
return transformed_distribution.TransformedDistribution(
base_distribution, bijector=bijector, validate_args=validate_args)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8bd1a61f2f5b2c302eb15d529ee167dbbc4beabe | 91b68cd2d4e50263ad53be9bf34c28f4b893b29d | /gps_viewer/settings.py | b2a3405df04c3832a53d81c285c277eb73146e39 | [] | no_license | storrellas/gps_viewer | 6145efca4bcf7f48a96a9d08ceb916bc21cd143f | e8577933b68169193f391808488cacd7ffd5ff69 | refs/heads/master | 2020-04-19T12:05:51.128193 | 2019-02-14T07:31:45 | 2019-02-14T07:31:45 | 168,184,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,391 | py | """
Django settings for gps_viewer project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o0%@ynj$m8ln=_-pn=)+de2$2ji5y7ks%_fuzi89n)p-3wowhe'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'api',
'ui'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'gps_viewer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gps_viewer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "ui_react/dist/")
]
| [
"storrellas@gmail.com"
] | storrellas@gmail.com |
60111fbb419ac10cb39513b5c247ebbd98e7015b | b51f277dfe339ea30dce10040eca40c20bd8a4dd | /src/config/setting.py | 7ac608787b2becd3c5908b109a5108d8a097ac85 | [
"BSD-3-Clause"
] | permissive | jack139/fair | e08b3b48391d0cb8e72bbc47e7592c030f587f48 | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | refs/heads/master | 2021-06-30T15:17:15.590764 | 2020-09-23T07:14:20 | 2020-09-23T07:14:20 | 160,322,019 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from pymongo import MongoClient
#####
debug_mode = True # Flase - production, True - staging
#####
#
enable_proxy = True
http_proxy = 'http://192.168.2.108:8888'
https_proxy = 'https://192.168.2.108:8888'
proxy_list = ['192.168.2.103']
enable_local_test = True
#####
web_serv_list={'web1' : ('192.168.2.99','192.168.2.99')} #
local_ip=web_serv_list['web1'][1]
cli = {'web' : MongoClient(web_serv_list['web1'][0]),}
# MongoClient('10.168.11.151', replicaset='rs0') # replica set
# MongoClient('10.168.11.151', replicaset='rs0', readPreference='secondaryPreferred') # 使用secondary 读
db_web = cli['web']['fair_db']
db_web.authenticate('ipcam','ipcam')
thread_num = 1
auth_user = ['test']
cs_admin = ['cs0']
tmp_path = '/usr/local/nginx/html/fair/static/tmp'
logs_path = '/usr/local/nginx/logs'
image_store_path = '/usr/local/nginx/html/fair/static/image/product'
default_shop='55837fd9ec6ef238912fab89'
B3_shop='55837fd9ec6ef238912fab89'
PT_shop={
'001' : '564708a2ec6ef2206f57043c', # 东南
'002' : '', # 华北
'003' : '', # 华东
}
app_host='app.urfresh.cn'
wx_host='wx.urfresh.cn'
image_host='image.urfresh.cn'
notify_host='app.urfresh.cn'
app_pool=['app.urfresh.cn']
WX_store = {
'000' : { # 测试
'wx_appid' : 'wxb920ef74b6a20e69',
'wx_appsecret' : 'ddace9d14b3413c65991278f09a03896',
'mch_id' : '1242104702',
},
'001' : { # 东南
'wx_appid' : 'wxa84493ca70802ab5',
'wx_appsecret' : 'd4624c36b6795d1d99dcf0547af5443d',
'mch_id' : '1284728201',
},
'002' : { # 华北
'wx_appid' : 'wx64a0c20da3b0acb7',
'wx_appsecret' : 'd4624c36b6795d1d99dcf0547af5443d',
'mch_id' : '1284420901',
},
'003' : { # 华东
'wx_appid' : 'wx2527355bfd909dbe',
'wx_appsecret' : '49e8eb83c3fce102215a92047e8e9290',
'mch_id' : '1253845801',
},
}
# region_id 来自文件
f=open('/region_id')
a=f.readlines()
f.close()
region_id = a[0].strip()
# 微信设置
wx_setting = WX_store[region_id]
order_fuffix=''
inner_number = {
'99990000100' : '9998',
'99990000101' : '3942',
'99990000102' : '4345',
'99990000103' : '2875',
'99990000104' : '3492',
'99990000105' : '0980',
'99990000106' : '3482',
'99990000107' : '5340',
'99990000108' : '9873',
'99990000109' : '2345',
'99990000110' : '8653',
}
http_port=80
https_port=443
mail_server='127.0.0.1'
sender='"Kam@Cloud"<kam@f8geek.com>'
worker=['2953116@qq.com']
web.config.debug = debug_mode
config = web.storage(
email = 'jack139@gmail.com',
site_name = 'ipcam',
site_des = '',
static = '/static'
)
| [
"gt@f8geek.com"
] | gt@f8geek.com |
90a7cddaa492df26fbac0ef47f1980e16f99b2ff | bb0eeade4685dc89ff8a53beb813afdf7394989d | /algorithm_test/saima/股神.py | e55fa66355c3369bf29859dbd5bbab3cf51b548f | [] | no_license | zhaocheng1996/pyproject | 72929cd0ba2f0486d7dc87a7defa82656bf75a8e | 0a1973dda314f844f9898357bc4a5c8ee3f2246d | refs/heads/master | 2021-10-26T08:38:43.675739 | 2019-04-11T13:52:46 | 2019-04-11T13:52:46 | 176,939,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | '''
有股神吗?
有,小赛就是!
经过严密的计算,小赛买了一支股票,他知道从他买股票的那天开始,股票会有以下变化:第一天不变,以后涨一天,跌一天,涨两天,跌一天,涨三天,跌一天...依此类推。
为方便计算,假设每次涨和跌皆为1,股票初始单价也为1,请计算买股票的第n天每股股票值多少钱?
输入
输入包括多组数据;
每行输入一个n,1<=n<=10^9 。
样例输入
1
2
3
4
5
输出
请输出他每股股票多少钱,对于每组数据,输出一行。
样例输出
1
2
1
2
3
'''
while 1 :
x =int(input())
k = 3
n = 3
while x-k>=n:
n+=k
k+=1#k就是减号的数量
if x<3:
print(x)
else:
print(int(x-(k-2)*2))
| [
"34829837+zhaocheng1996@users.noreply.github.com"
] | 34829837+zhaocheng1996@users.noreply.github.com |
8e75b7716a428394722adb3cf988320cd3c06197 | e81576012330e6a6024d14f3e241f88ca34b73cd | /python_code/vnev/Lib/site-packages/quart/wrappers/request.py | d48b39736d3f799c9c937672400f0ccf79c174b5 | [
"MIT"
] | permissive | Ureimu/weather-robot | eba6a84147755aa83c941a306bac1a7c4e95e23e | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | refs/heads/master | 2021-01-15T07:23:42.274413 | 2020-03-23T02:30:19 | 2020-03-23T02:30:19 | 242,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,544 | py | import asyncio
import io
from cgi import FieldStorage, parse_header
from typing import Any, AnyStr, Awaitable, Callable, Generator, List, Optional, Union
from urllib.parse import parse_qs
from werkzeug.datastructures import CombinedMultiDict, Headers, MultiDict
from .base import BaseRequestWebsocket, JSONMixin
from ..datastructures import FileStorage
SERVER_PUSH_HEADERS_TO_COPY = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
class Body:
"""A request body container.
The request body can either be iterated over and consumed in parts
(without building up memory usage) or awaited.
.. code-block:: python
async for data in body:
...
# or simply
complete = await body
Note: It is not possible to iterate over the data and then await
it.
"""
def __init__(
self, expected_content_length: Optional[int], max_content_length: Optional[int]
) -> None:
self._data = bytearray()
self._complete: asyncio.Event = asyncio.Event()
self._has_data: asyncio.Event = asyncio.Event()
self._max_content_length = max_content_length
# Exceptions must be raised within application (not ASGI)
# calls, this is achieved by having the ASGI methods set this
# to an exception on error.
self._must_raise: Optional[Exception] = None
if (
expected_content_length is not None
and max_content_length is not None
and expected_content_length > max_content_length
):
from ..exceptions import RequestEntityTooLarge # noqa Avoiding circular import
self._must_raise = RequestEntityTooLarge()
def __aiter__(self) -> "Body":
return self
async def __anext__(self) -> bytes:
if self._must_raise is not None:
raise self._must_raise
# if we got all of the data in the first shot, then self._complete is
# set and self._has_data will not get set again, so skip the await
# if we already have completed everything
if not self._complete.is_set():
await self._has_data.wait()
if self._complete.is_set() and len(self._data) == 0:
raise StopAsyncIteration()
data = bytes(self._data)
self._data.clear()
self._has_data.clear()
return data
def __await__(self) -> Generator[Any, None, Any]:
# Must check the _must_raise before and after waiting on the
# completion event as it may change whilst waiting and the
# event may not be set if there is already an issue.
if self._must_raise is not None:
raise self._must_raise
yield from self._complete.wait().__await__()
if self._must_raise is not None:
raise self._must_raise
return bytes(self._data)
def append(self, data: bytes) -> None:
if data == b"" or self._must_raise is not None:
return
self._data.extend(data)
self._has_data.set()
if self._max_content_length is not None and len(self._data) > self._max_content_length:
from ..exceptions import RequestEntityTooLarge # noqa Avoiding circular import
self._must_raise = RequestEntityTooLarge()
self.set_complete()
def set_complete(self) -> None:
self._complete.set()
self._has_data.set()
def set_result(self, data: bytes) -> None:
"""Convienience method, mainly for testing."""
self.append(data)
self.set_complete()
class Request(BaseRequestWebsocket, JSONMixin):
"""This class represents a request.
It can be subclassed and the subclassed used in preference by
replacing the :attr:`~quart.Quart.request_class` with your
subclass.
Attributes:
body_class: The class to store the body data within.
"""
body_class = Body
def __init__(
self,
method: str,
scheme: str,
path: str,
query_string: bytes,
headers: Headers,
root_path: str,
http_version: str,
*,
max_content_length: Optional[int] = None,
body_timeout: Optional[int] = None,
send_push_promise: Callable[[str, Headers], Awaitable[None]],
) -> None:
"""Create a request object.
Arguments:
method: The HTTP verb.
scheme: The scheme used for the request.
path: The full unquoted path of the request.
query_string: The raw bytes for the query string part.
headers: The request headers.
root_path: The root path that should be prepended to all
routes.
http_version: The HTTP version of the request.
body: An awaitable future for the body data i.e.
``data = await body``
max_content_length: The maximum length in bytes of the
body (None implies no limit in Quart).
body_timeout: The maximum time (seconds) to wait for the
body before timing out.
send_push_promise: An awaitable to send a push promise based
off of this request (HTTP/2 feature).
"""
super().__init__(method, scheme, path, query_string, headers, root_path, http_version)
self.body_timeout = body_timeout
self.body = self.body_class(self.content_length, max_content_length)
self._form: Optional[MultiDict] = None
self._files: Optional[MultiDict] = None
self._send_push_promise = send_push_promise
async def get_data(self, raw: bool = True) -> AnyStr:
"""The request body data."""
try:
body_future = asyncio.ensure_future(self.body)
raw_data = await asyncio.wait_for(body_future, timeout=self.body_timeout)
except asyncio.TimeoutError:
body_future.cancel()
from ..exceptions import RequestTimeout # noqa Avoiding circular import
raise RequestTimeout()
if raw:
return raw_data
else:
return raw_data.decode(self.charset)
@property
async def data(self) -> bytes:
return await self.get_data()
@property
async def values(self) -> CombinedMultiDict:
form = await self.form
return CombinedMultiDict([self.args, form])
@property
async def form(self) -> MultiDict:
"""The parsed form encoded data.
Note file data is present in the :attr:`files`.
"""
if self._form is None:
await self._load_form_data()
return self._form
@property
async def files(self) -> MultiDict:
"""The parsed files.
This will return an empty multidict unless the request
mimetype was ``enctype="multipart/form-data"`` and the method
POST, PUT, or PATCH.
"""
if self._files is None:
await self._load_form_data()
return self._files
async def _load_form_data(self) -> None:
raw_data: bytes = await self.get_data(raw=True)
self._form = MultiDict()
self._files = MultiDict()
content_header = self.content_type
if content_header is None:
return
content_type, parameters = parse_header(content_header)
if content_type == "application/x-www-form-urlencoded":
try:
data = raw_data.decode(parameters.get("charset", "utf-8"))
except UnicodeDecodeError:
from ..exceptions import BadRequest # noqa Avoiding circular import
raise BadRequest()
for key, values in parse_qs(data, keep_blank_values=True).items():
for value in values:
self._form.add(key, value)
elif content_type == "multipart/form-data":
field_storage = FieldStorage(
io.BytesIO(raw_data),
headers={**self.headers}, # type: ignore
environ={"REQUEST_METHOD": "POST"},
limit=len(raw_data),
)
for key in field_storage: # type: ignore
field_storage_key = field_storage[key]
if isinstance(field_storage_key, list):
for item in field_storage_key:
self._load_field_storage(key, item)
else:
self._load_field_storage(key, field_storage_key)
def _load_field_storage(self, key: str, field_storage: FieldStorage) -> None:
if isinstance(field_storage, FieldStorage) and field_storage.filename is not None:
self._files.add(
key,
FileStorage(
io.BytesIO(field_storage.file.read()),
field_storage.filename,
field_storage.name, # type: ignore
field_storage.type,
field_storage.headers, # type: ignore
),
)
else:
self._form.add(key, field_storage.value)
@property
def content_encoding(self) -> Optional[str]:
return self.headers.get("Content-Encoding")
@property
def content_length(self) -> Optional[int]:
if "Content-Length" in self.headers:
return int(self.headers["Content-Length"])
else:
return None
@property
def content_md5(self) -> Optional[str]:
return self.headers.get("Content-md5")
@property
def content_type(self) -> Optional[str]:
return self.headers.get("Content-Type")
async def _load_json_data(self) -> str:
"""Return the data after decoding."""
return await self.get_data(raw=False)
async def send_push_promise(self, path: str) -> None:
headers = Headers()
for name in SERVER_PUSH_HEADERS_TO_COPY:
for value in self.headers.getlist(name):
headers.add(name, value)
await self._send_push_promise(path, headers)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.method}, {self.path})"
class Websocket(BaseRequestWebsocket):
def __init__(
self,
path: str,
query_string: bytes,
scheme: str,
headers: Headers,
root_path: str,
http_version: str,
subprotocols: List[str],
receive: Callable,
send: Callable,
accept: Callable,
) -> None:
"""Create a request object.
Arguments:
path: The full unquoted path of the request.
query_string: The raw bytes for the query string part.
scheme: The scheme used for the request.
headers: The request headers.
root_path: The root path that should be prepended to all
routes.
http_version: The HTTP version of the request.
subprotocols: The subprotocols requested.
receive: Returns an awaitable of the current data
accept: Idempotent callable to accept the websocket connection.
"""
super().__init__("GET", scheme, path, query_string, headers, root_path, http_version)
self._accept = accept
self._receive = receive
self._send = send
self._subprotocols = subprotocols
@property
def requested_subprotocols(self) -> List[str]:
return self._subprotocols
async def receive(self) -> AnyStr:
await self.accept()
return await self._receive()
async def send(self, data: AnyStr) -> None:
# Must allow for the event loop to act if the user has say
# setup a tight loop sending data over a websocket (as in the
# example). So yield via the sleep.
await asyncio.sleep(0)
await self.accept()
await self._send(data)
async def accept(
self, headers: Optional[Union[dict, Headers]] = None, subprotocol: Optional[str] = None,
) -> None:
"""Manually chose to accept the websocket connection.
Arguments:
headers: Additional headers to send with the acceptance
response.
subprotocol: The chosen subprotocol, optional.
"""
if headers is None:
headers_ = Headers()
else:
headers_ = Headers(headers)
await self._accept(headers_, subprotocol)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.path})"
| [
"a1090693441@163.com"
] | a1090693441@163.com |
b231c21b792bd41705191896803d0fb8f47a8d88 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/assembler/sleigh/parse/AssemblyParseActionGotoTable.pyi | e97b09c156aed97e3a672ff67ca44e03de9e423d | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,427 | pyi | import ghidra.app.plugin.assembler.sleigh.grammars
import ghidra.app.plugin.assembler.sleigh.parse
import ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable
import ghidra.app.plugin.assembler.sleigh.symbol
import java.lang
import java.util
class AssemblyParseActionGotoTable(object):
"""
The Action/Goto table for a LALR(1) parser
This table is unconventional in that it permits a single cell to be populated by more than one
action. Typically, such a situation would indicate an ambiguity, or the need for a longer
look-ahead value. Because we do not presume to control the grammar (which was automatically
derived from another source), the parsing algorithm will simply branch, eventually trying both
options.
"""
class Action(object, java.lang.Comparable):
def __init__(self): ...
@overload
def compareTo(self, __a0: ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class ReduceAction(ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action):
def __init__(self, __a0: ghidra.app.plugin.assembler.sleigh.grammars.AssemblyProduction): ...
@overload
def compareTo(self, __a0: ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class ShiftAction(ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action):
def __init__(self, __a0: int): ...
@overload
def compareTo(self, __a0: ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class AcceptAction(ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action):
ACCEPT: ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.AcceptAction = acc
def __init__(self): ...
@overload
def compareTo(self, __a0: ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class GotoAction(ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action):
def __init__(self, __a0: int): ...
@overload
def compareTo(self, __a0: ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def get(self, fromState: int, next: ghidra.app.plugin.assembler.sleigh.symbol.AssemblySymbol) -> java.util.Collection:
"""
Get all entries in a given cell
@param fromState the state (row) in the table
@param next the symbol (column) in the table
@return all action entries in the given cell
"""
...
def getClass(self) -> java.lang.Class: ...
def getExpected(self, fromState: int) -> java.util.Collection:
"""
Get the terminals that are expected, i.e., have entries for the given state
@param fromState the state (row) in the table
@return the collection of populated columns (terminals) for the given state
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def put(self, fromState: int, next: ghidra.app.plugin.assembler.sleigh.symbol.AssemblySymbol, action: ghidra.app.plugin.assembler.sleigh.parse.AssemblyParseActionGotoTable.Action) -> bool:
"""
Add an action entry to the given cell
@param fromState the state (row) in the table
@param next the symbol (column) in the table
@param action the entry to add to the cell
@return true, if the given entry was not already present
"""
...
def putAccept(self, fromState: int) -> bool:
"""
Add an ACCEPT entry for the given state at the end of input
@param fromState the state (row) in the table
@return true, if the state does not already accept on end of input
"""
...
def putGoto(self, fromState: int, next: ghidra.app.plugin.assembler.sleigh.symbol.AssemblyNonTerminal, newState: int) -> bool:
"""
Add a GOTO entry to the given cell
@param fromState the state (row) in the table
@param next the symbol (column) in the table
@param newState the target state
@return true, if the given entry was not already present
"""
...
def putReduce(self, fromState: int, next: ghidra.app.plugin.assembler.sleigh.symbol.AssemblyTerminal, prod: ghidra.app.plugin.assembler.sleigh.grammars.AssemblyProduction) -> bool:
"""
Add a REDUCE (R<i>n</i>) entry to the given cell
@param fromState the state (row) in the table
@param next the symbol (column) in the table
@param prod the production (having index <i>n</i>) associated with the reduction
@return true, if the given entry was not already present
"""
...
def putShift(self, fromState: int, next: ghidra.app.plugin.assembler.sleigh.symbol.AssemblyTerminal, newState: int) -> bool:
"""
Add a SHIFT (S<i>n</i>) entry to the given cell
@param fromState the state (row) in the table
@param next the symbol (column) in the table
@param newState the state (<i>n</i>) after the shift is applied
@return true, if the given entry was not already present
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
b9ad225f2809d99e44ab9e566cdd0d763a4554f3 | 09f8b619e0e351b653db31ea9d65a97767e92a1c | /setup.py | 5c9af850c231d2c3cf3a7defce1882c329f39e18 | [
"BSD-2-Clause"
] | permissive | sudlab/cgat | 793e5ec8503db399ad9a741293bf3683828bf149 | 4261368393195b8dc75c3ad544d5556a27280633 | refs/heads/master | 2021-01-21T18:00:16.097868 | 2017-05-17T08:03:47 | 2017-05-17T08:03:47 | 45,106,248 | 0 | 1 | null | 2017-04-05T13:56:50 | 2015-10-28T10:53:40 | Python | UTF-8 | Python | false | false | 9,104 | py | import glob
import sys
import os
import subprocess
import re
########################################################################
#######################################################################
# Check for dependencies
#
# Is there a way to do this more elegantly?
# 1. Run "pip install numpy"
# 2. Wrap inside functions (works for numpy/pysam, but not cython)
try:
import numpy
except ImportError:
raise ImportError(
"the CGAT code collection requires numpy to be installed "
"before running setup.py (pip install numpy)")
try:
import Cython
except ImportError:
raise ImportError(
"the CGAT code collection requires cython to "
"be installed before running setup.py (pip install cython)")
try:
import pysam
except ImportError:
raise ImportError(
"the CGAT code collection requires pysam to "
"be installed before running setup.py (pip install pysam)")
########################################################################
########################################################################
# Import setuptools
# Use existing setuptools, otherwise try ez_setup.
try:
import setuptools
except ImportError:
# try to get via ez_setup
# ez_setup did not work on all machines tested as
# it uses curl with https protocol, which is not
# enabled in ScientificLinux
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages, Extension
from distutils.version import LooseVersion
if LooseVersion(setuptools.__version__) < LooseVersion('1.1'):
print("Version detected:", LooseVersion(setuptools.__version__))
raise ImportError(
"the CGAT code collection requires setuptools 1.1 higher")
from Cython.Distutils import build_ext
########################################################################
########################################################################
IS_OSX = sys.platform == 'darwin'
########################################################################
########################################################################
# collect CGAT version
sys.path.insert(0, "CGAT")
import version
version = version.__version__
###############################################################
###############################################################
# Check for external dependencies
#
# Not exhaustive, simply execute a representative tool from a toolkit.
external_dependencies = (
("wigToBigWig", "UCSC tools", 255),
("bedtools", "bedtools", 0),
)
for tool, toolkit, expected in external_dependencies:
try:
# py3k
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
try:
retcode = subprocess.call(tool, shell=True,
stdout=DEVNULL, stderr=DEVNULL)
except OSError as msg:
print(("WARNING: depency check for %s failed: %s" % (toolkit, msg)))
# UCSC tools return 255 when called without arguments
if retcode != expected:
print(("WARNING: depency check for %s(%s) failed, error %i" %
(toolkit, tool, retcode)))
###############################################################
###############################################################
# Define dependencies
#
major, minor1, minor2, s, tmp = sys.version_info
if (major == 2 and minor1 < 7) or major < 2:
raise SystemExit("""CGAT requires Python 2.7 or later.""")
#####################################################################
#####################################################################
# Code to install dependencies from a repository
#####################################################################
# Modified from http://stackoverflow.com/a/9125399
#####################################################################
def which(program):
"""
Detect whether or not a program is installed.
Thanks to http://stackoverflow.com/a/377028/70191
"""
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
REPO_REQUIREMENT = re.compile(
r'^-e (?P<link>(?P<vcs>git|svn|hg|bzr).+#egg=(?P<package>.+)-(?P<version>\d(?:\.\d)*))$')
HTTPS_REQUIREMENT = re.compile(
r'^-e (?P<link>.*).+#(?P<package>.+)-(?P<version>\d(?:\.\d)*)$')
install_requires = []
dependency_links = []
for requirement in (
l.strip() for l in open('requires.txt') if not l.startswith("#")):
match = REPO_REQUIREMENT.match(requirement)
if match:
assert which(match.group('vcs')) is not None, \
("VCS '%(vcs)s' must be installed in order to "
"install %(link)s" % match.groupdict())
install_requires.append("%(package)s==%(version)s" % match.groupdict())
dependency_links.append(match.group('link'))
continue
if requirement.startswith("https"):
install_requires.append(requirement)
continue
match = HTTPS_REQUIREMENT.match(requirement)
if match:
install_requires.append("%(package)s>=%(version)s" % match.groupdict())
dependency_links.append(match.group('link'))
continue
install_requires.append(requirement)
if major == 2:
install_requires.extend(['web.py>=0.37',
'xlwt>=0.7.4',
'matplotlib-venn>=0.5'])
elif major == 3:
pass
cgat_packages = find_packages()
cgat_package_dirs = {'CGAT': 'CGAT'}
##########################################################
##########################################################
# Classifiers
classifiers = """
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
##########################################################
# Cython Extensions
# Connected components cython extension
Components = Extension(
'CGAT.Components',
['CGAT/Components/Components.pyx',
'CGAT/Components/connected_components.cpp', ],
library_dirs=[],
libraries=[],
language="c++",
)
# Nested containment lists
NCL = Extension(
"CGAT.NCL.cnestedlist",
["CGAT/NCL/cnestedlist.pyx",
"CGAT/NCL/intervaldb.c"],
library_dirs=[],
libraries=[],
language="c",
)
# Timeseries analysis
Timeseries = Extension(
"CGAT.Timeseries.cmetrics",
["CGAT/Timeseries/cmetrics.pyx"],
include_dirs=[numpy.get_include()],
library_dirs=[],
libraries=[],
language="c",
)
# Nested containment lists
GeneModelAnalysis = Extension(
"CGAT.GeneModelAnalysis",
["CGAT/GeneModelAnalysis.pyx"],
include_dirs=pysam.get_include() + [numpy.get_include()],
library_dirs=[],
libraries=[],
define_macros=pysam.get_defines(),
language="c",
)
# automatically build pyximport script extensions
pyx_files = glob.glob("CGAT/scripts/*.pyx")
script_extensions = []
pysam_dirname = os.path.dirname(pysam.__file__)
include_dirs = [numpy.get_include()] + pysam.get_include()
if IS_OSX:
# linking against bundles does no work (and apparently is not needed)
# within OS X
extra_link_args = []
else:
extra_link_args = [os.path.join(pysam_dirname, x) for x in
pysam.get_libraries()]
for pyx_file in pyx_files:
script_name = os.path.basename(pyx_file)
script_prefix = script_name[:-4]
script_extensions.append(
Extension("CGAT.scripts.%s" % (script_prefix),
sources=[pyx_file],
extra_link_args=extra_link_args,
include_dirs=include_dirs,
define_macros=pysam.get_defines())
)
ext_modules = [Components, NCL, Timeseries, GeneModelAnalysis] + script_extensions
setup(
# package information
name='CGAT',
version=version,
description='CGAT : the Computational Genomics Analysis Toolkit',
author='Andreas Heger',
author_email='andreas.heger@gmail.com',
license="MIT",
platforms=["any"],
keywords="computational genomics",
long_description='CGAT : the Computational Genomics Analysis Toolkit',
classifiers=[_f for _f in classifiers.split("\n") if _f],
url="http://www.cgat.org/cgat/Tools/",
# package contents
packages=cgat_packages,
package_dir=cgat_package_dirs,
include_package_data=True,
entry_points={
'console_scripts': ['cgat = CGAT.cgat:main']
},
# dependencies
install_requires=install_requires,
dependency_links=dependency_links,
# extension modules
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
# other options
zip_safe=False,
test_suite="tests",
)
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
501a8e701d08eb3184bf15a41bc714ea9b715091 | b76daa106277ef2f7ab7f6e3278546c6da0bb967 | /base/web/server.py | 25c3b1b57642b2270e4f795b12adb9c5b5a914d2 | [] | no_license | DyLanCao/ipython | d071b4659999062106438ec077d27754a711ef92 | 746e070d193de04002d277e5170ddf8b5d9d4d44 | refs/heads/master | 2021-06-12T19:31:44.325346 | 2021-02-20T03:17:58 | 2021-02-20T03:17:58 | 142,657,284 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #-*- coding:utf-8 -*-
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
'''处理请求并返回页面'''
# 页面模板
Page = '''\
<html>
<body>
<p>Hello, web!</p>
</body>
</html>
'''
# 处理一个GET请求
def do_GET(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(self.Page)))
self.end_headers()
self.wfile.write(self.Page)
#----------------------------------------------------------------------
if __name__ == '__main__':
serverAddress = ('', 8080)
server = BaseHTTPServer.HTTPServer(serverAddress, RequestHandler)
server.serve_forever()
| [
"caoyin2011@163.com"
] | caoyin2011@163.com |
ca44966244b953ca337e34a0e310dced35a4d891 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/venv/lib/python3.8/site-packages/pip/_vendor/pep517/wrappers.py | aab04ff348d90f3ea88259b7d8b1bdf203e500ca | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c855382e9ed32186e6b9539363ea579e5c8667717f82b222f8994b92918df069
size 10783
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
ef5717815e789db410e78d7019e8f61a68415355 | 421fd914fb9c40fa5f8466377d7270dbf3ec1fdb | /Net/tensor/net7.py | 56820dc7e2ee57d1afae113f13bb65fbbaa0a43f | [] | no_license | jon--lee/vision-amt | 382d466647f52cb0c98a84cdb466821615d07b94 | 6a05921dbc2aa82e1aa651b935b57d470d903cbd | refs/heads/master | 2020-04-12T02:31:33.258391 | 2016-07-22T01:11:27 | 2016-07-22T01:11:27 | 50,693,639 | 1 | 1 | null | 2016-07-22T01:11:46 | 2016-01-29T21:47:10 | Python | UTF-8 | Python | false | false | 1,782 | py | import tensorflow as tf
import inputdata
import random
from tensornet import TensorNet
import time
import datetime
class NetSeven(TensorNet):
def __init__(self):
self.dir = "./net7/"
self.name = "net7"
self.channels = 3
self.x = tf.placeholder('float', shape=[None, 250, 250, self.channels])
self.y_ = tf.placeholder("float", shape=[None, 4])
self.w_conv1 = self.weight_variable([5, 5, self.channels, 5])
self.b_conv1 = self.bias_variable([5])
self.h_conv1 = tf.nn.relu(self.conv2d(self.x, self.w_conv1) + self.b_conv1)
#self.h_conv1 = self.max_pool(self.h_conv1, 4)
#self.w_conv2 = self.weight_variable([5, 5, 5, 3])
#self.b_conv2 = self.bias_variable([3])
#self.h_conv2 = tf.nn.relu(self.conv2d(self.h_conv1, self.w_conv2) + self.b_conv2)
#self.h_conv2 = self.max_pool(self.h_conv2, 4)
# print self.h_conv1.get_shape()
conv_num_nodes = self.reduce_shape(self.h_conv1.get_shape())
fc1_num_nodes = 128
self.w_fc1 = self.weight_variable([conv_num_nodes, fc1_num_nodes])
# self.w_fc1 = self.weight_variable([1000, fc1_num_nodes])
self.b_fc1 = self.bias_variable([fc1_num_nodes])
self.h_conv_flat = tf.reshape(self.h_conv1, [-1, conv_num_nodes])
self.h_fc1 = tf.nn.relu(tf.matmul(self.h_conv_flat, self.w_fc1) + self.b_fc1)
self.w_fc2 = self.weight_variable([fc1_num_nodes, 4])
self.b_fc2 = self.bias_variable([4])
self.y_out = tf.tanh(tf.matmul(self.h_fc1, self.w_fc2) + self.b_fc2)
self.loss = tf.reduce_mean(.5*tf.square(self.y_out - self.y_))
self.train_step = tf.train.MomentumOptimizer(.003, .9)
self.train = self.train_step.minimize(self.loss)
| [
"mdlaskey@umich.edu"
] | mdlaskey@umich.edu |
dbe58b94975afcbf250caa82532fcbdb215e99ef | e23b28fc3ed196866a04af4e790c1c16b1b5183e | /django/login2/apps/login2_app/views.py | 1d07c6c74710450bdc4e95fdb4c868fc722faf28 | [] | no_license | diazmc/Python | 6f47e7fcfb8c263eb154d59a5a9b3866e2c9d6a8 | 89e3d54eeb2b0ed7dc7af24103ace6fb6e45d65e | refs/heads/master | 2021-01-20T01:18:23.954877 | 2017-08-24T10:39:19 | 2017-08-24T10:39:19 | 101,283,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | from django.shortcuts import render, redirect
from .models import User
from django.contrib import messages
def index(request):
return render(request, 'login2_app/index.html')
def process(request):
if request.method == "POST":
res = User.objects.register(request.POST)
if res['status']:
request.session['user'] = request.POST['first_name']
return redirect('/success')
else:
for i in range(0, len(res['data'])):
messages.error(request, res['data'][i])
return redirect('/')
def login(request):
if request.method == "POST":
res = User.objects.login(request.POST)
if res['status']:
request.session['user'] = res['data'][0].first_name
return redirect('/success')
else:
messages.error(request, res['data'][0])
return redirect('/')
def logout(request):
request.session.flush()
return redirect('/')
def success(request):
return render(request, 'login2_app/success.html') | [
"mc.arthur_d@hotmail.com"
] | mc.arthur_d@hotmail.com |
d6102bc0218c764e7e2e42eaf102ab7f59933f42 | a18539697b2972a2ade5b8175c065f441962047d | /my_mini_web_ok/my_serverwork.py | 1b349b5ef0130071fdb8fa363ab8ac06cacfa637 | [
"MIT"
] | permissive | aGrass0825/mini_web_framwork | a68ecf2007e93ca7add1dd20973ef7bcc1f9501c | cff3881e65aae24e6cd7e1cd4567ffbea684ff89 | refs/heads/master | 2021-11-25T19:05:46.975097 | 2021-11-24T09:47:59 | 2021-11-24T09:47:59 | 226,657,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | """
任务:写一个web服务器
1、导入模块socket\threading\sys
2、建立套接字对象
3、地址重写
3、绑定端口号
4、设置监听,套接字由主动变被动
5、接受浏览器的链接accept
6、接收浏览器的请求
8、查找请求目录
9、发送响应报文
10、结束与浏览器的链接
"""
import socket
import threading
import sys
from application import app
class HttpServer(object):
"""服务器类"""
def __init__(self):
self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.tcp_server_socket.bind(("", 8080))
self.tcp_server_socket.listen(128)
def request_client(self, new_client_socket, ip_port):
print("浏览器上线:", ip_port)
request_data = new_client_socket.recv(1024)
# print(request_data)
if not request_data:
print("浏览器下线", ip_port)
new_client_socket.close()
return
response_data = app.application("./static", request_data, ip_port)
new_client_socket.send(response_data)
new_client_socket.close()
def start(self):
while True:
new_client_socket, ip_port = self.tcp_server_socket.accept()
thread_client = threading.Thread(target=self.request_client, args=(new_client_socket, ip_port))
thread_client.setDaemon(True)
thread_client.start()
if __name__ == '__main__':
"""启动"""
http_server_socket = HttpServer()
http_server_socket.start()
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
9958750cf4c7f18679fe4caf28b4ef2121d81922 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03910/s508996847.py | 9053d73951bd78510caa8fa55e5777f1f1518c70 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | n = int(input())
cnt = 0
for i in range(1, n + 1):
cnt += i
if cnt >= n:
res = i
break
remove = cnt - n
for i in range(1, res + 1):
if i != remove:
print(i) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
18b2f3f798405ef725faeca6f1b3db16e933b964 | 864755f7d733351b205e460ec54a5f6d13050037 | /devilry/devilry_admin/tests/subject/test_crinstance_subject.py | cbe74d7589cee6ba35a93bdf5ea6b1e7b6b59d04 | [] | permissive | aless80/devilry-django | 27fc14b7bb7356f5f9d168e435a84e7bb43a682a | 416c262e75170d5662542f15e2d7fecf5ab84730 | refs/heads/master | 2020-05-20T12:22:09.255393 | 2019-05-19T21:06:57 | 2019-05-19T21:06:57 | 185,568,847 | 0 | 0 | BSD-3-Clause | 2019-05-08T08:53:52 | 2019-05-08T08:53:51 | null | UTF-8 | Python | false | false | 5,053 | py | from django.conf import settings
from django.test import TestCase, RequestFactory
from model_mommy import mommy
from devilry.devilry_account.models import PermissionGroup
from devilry.devilry_admin.views.subject import crinstance_subject
class TestCrAdminInstance(TestCase):
def test_get_rolequeryset_not_admin(self):
mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([], list(instance.get_rolequeryset()))
def test_get_rolequeryset_superuser(self):
testsubject = mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL, is_superuser=True)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([testsubject], list(instance.get_rolequeryset()))
def test_get_rolequeryset_admin_on_period_does_not_apply(self):
testperiod = mommy.make('core.Period')
periodpermissiongroup = mommy.make('devilry_account.PeriodPermissionGroup',
period=testperiod)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([], list(instance.get_rolequeryset()))
def test_get_rolequeryset_admin_on_subject(self):
testsubject = mommy.make('core.Subject')
subjectpermissiongroup = mommy.make('devilry_account.SubjectPermissionGroup',
subject=testsubject)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([testsubject], list(instance.get_rolequeryset()))
def test_get_devilryrole_for_requestuser_not_admin(self):
testsubject = mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
with self.assertRaises(ValueError):
instance.get_devilryrole_for_requestuser()
def test_get_devilryrole_for_requestuser_superuser(self):
testsubject = mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL, is_superuser=True)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual('departmentadmin', instance.get_devilryrole_for_requestuser())
def test_get_devilryrole_for_requestuser_departmentadmin(self):
testsubject = mommy.make('core.Subject')
subjectpermissiongroup = mommy.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_DEPARTMENTADMIN,
subject=testsubject)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual('departmentadmin', instance.get_devilryrole_for_requestuser())
def test_get_devilryrole_for_requestuser_subjectadmin(self):
testsubject = mommy.make('core.Subject')
subjectpermissiongroup = mommy.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN,
subject=testsubject)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual('subjectadmin', instance.get_devilryrole_for_requestuser())
| [
"post@espenak.net"
] | post@espenak.net |
a92caba3e5809f7220aed29e8c066b8129f8ccae | 53c4460e8cce123276932b4ddf2fe00fdee75b65 | /format05.py | f2d85db164b628bffe9b45d81b308b9847712256 | [] | no_license | Yush1nk1m/Study_Python | 5ba8a6eeb73184ea7f1e892daae182b78d265e06 | 516f0ba6d9411453fa0d2df00314e383e3f8cabb | refs/heads/master | 2023-07-09T16:22:22.663219 | 2021-08-22T15:22:22 | 2021-08-22T15:22:22 | 398,831,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | output_a = "{:f}".format(52.273)
output_b = "{:15f}".format(52.273) # 15칸 만들기
output_c = "{:+15f}".format(52.273) # 15칸에 부호 추가하기
output_d = "{:+015f}".format(52.273) # 15칸에 부호 추가하고 0으로 채우기
print(output_a)
print(output_b)
print(output_c)
print(output_d)
| [
"kys010306@sogang.ac.kr"
] | kys010306@sogang.ac.kr |
0246403fce1525adbf6c3160b1c8300f3eb2f89b | b1c2e16cff9f0dd9946c61c9504579e0254fef51 | /base/base_data_loader.py | 67d3e8029f2299ca206ffcf1a2af2043b34ead38 | [
"CC-BY-2.0"
] | permissive | JODONG2/semantic-segmentation-level2-cv-02 | 53bfc2a115e62889880ebb1812b790d1b5759c4b | ecef6844454e2339436d5c201392ee55b08781ee | refs/heads/master | 2023-08-26T08:29:01.990496 | 2021-11-08T04:00:49 | 2021-11-08T04:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
class BaseDataLoader(DataLoader):
"""
Base class for all data loaders
"""
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
self.sampler, self.valid_sampler = self._split_sampler(self.validation_split)
self.init_kwargs = {
"dataset": dataset,
"batch_size": batch_size,
"shuffle": self.shuffle,
"collate_fn": collate_fn,
"num_workers": num_workers,
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if split == 0.0:
return None, None
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert split > 0
assert split < self.n_samples, "validation set size is configured to be larger than entire dataset."
len_valid = split
else:
len_valid = int(self.n_samples * split)
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
def split_validation(self):
if self.valid_sampler is None:
return None
else:
return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
| [
"hanbin@kakao.com"
] | hanbin@kakao.com |
522c2616de26f014f7db5d31603b5d1a2410d0c1 | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/rscript101.py | aa96d464b2e8b9439bb1ed15d7f8701051283383 | [] | no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,689 | py | __author__ = 'ZFTurbo: https://kaggle.com/zfturbo'
import datetime
import os
from collections import defaultdict
from operator import itemgetter
import operator
import random
import itertools
import heapq
import math
random.seed(2016)
def apk(actual, predicted, k=7):
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return 0.0
return score / min(len(actual), k)
hashes_important_indexes = list(range(2, 24))
hashes_important_indexes.remove(6) # date type (fecha_alta)
hashes_important_indexes.remove(22) # float type (renta)
all = itertools.combinations(hashes_important_indexes, 5)
hashes_indexes = random.sample(list(all), 2)
# print('Current set of hash indexes: {}'.format(hashes_indexes))
distrib = defaultdict(int)
def get_hashes(arr):
global hashes_indexes, distrib
(fecha_dato, ncodpers, ind_empleado,
pais_residencia, sexo, age,
fecha_alta, ind_nuevo, antiguedad,
indrel, ult_fec_cli_1t, indrel_1mes,
tiprel_1mes, indresi, indext,
conyuemp, canal_entrada, indfall,
tipodom, cod_prov, nomprov,
ind_actividad_cliente, renta, segmento) = arr[:24]
renta_slice = [45542.97, 57629.67, 68211.78, 78852.39, 90461.97,
103855.23, 120063.0, 141347.49, 173418.36, 234687.12, 28894396.51]
renta1 = -1
if renta != '' and renta != 'NA':
flrenta = float(renta)
for i in range(0, len(renta_slice)):
if flrenta < renta_slice[i]:
renta1 = i
break
distrib[renta1] += 1
sub = []
if 1:
sub = [
(1, pais_residencia, sexo, age, ind_nuevo, segmento, ind_empleado, ind_actividad_cliente, indresi, renta1),
# (2, segmento, nomprov),
(3, ncodpers),
(4, ind_empleado,ind_actividad_cliente,ind_nuevo,canal_entrada),
(5, pais_residencia, sexo, renta1, age, segmento),
(6, pais_residencia, sexo, antiguedad, segmento, ind_empleado),
]
else:
# Random set
sub = [itemgetter(*h)(arr) for h in hashes_indexes]
return sub
def date_is_important(date, d_type):
possible_dates = ['2015-01-28', '2015-02-28', '2015-03-28', '2015-04-28', '2015-05-28',
'2015-06-28', '2015-07-28', '2015-08-28', '2015-09-28', '2015-10-28',
'2015-11-28', '2015-12-28', '2016-01-28', '2016-02-28', '2016-03-28',
'2016-04-28', '2016-05-28']
koeff = 0
if d_type == 'valid':
if date == '2015-05-28':
koeff = 10
score = 1 + koeff*(possible_dates.index(date)-1)
else:
if date == '2015-06-28':
koeff = 10
score = 1 + koeff*possible_dates.index(date)
return score
def add_data_to_main_arrays(arr, best, overallbest, customer, d_type):
date = arr[0]
ncodpers = arr[1]
hashes = get_hashes(arr)
importance = date_is_important(date, d_type)
part = arr[24:]
num_prod = 0
for i in range(24):
if part[i] == '1':
num_prod +=1
num_prod = num_prod % 3
for i in range(24):
if part[i] == '1':
if ncodpers in customer:
if customer[ncodpers][i] == '0':
for h in hashes:
best[h][i] += (importance+num_prod)
overallbest[i] += (importance+num_prod)
else:
for h in hashes:
best[h][i] += (importance+num_prod)
overallbest[i] += (importance+num_prod)
customer[ncodpers] = part
def sort_main_arrays(best, overallbest):
out = dict()
for b in best:
arr = best[b]
srtd = sorted(arr.items(), key=operator.itemgetter(1), reverse=True)
out[b] = srtd
best = out
overallbest = sorted(overallbest.items(), key=operator.itemgetter(1), reverse=True)
return best, overallbest
def get_next_best_prediction(best, hashes, predicted, cst):
score = [0] * 24
for h in hashes:
if h in best:
for i in range(len(best[h])):
sc = 24-i + len(h)
index = best[h][i][0]
if cst is not None:
if cst[index] == '1':
continue
if index not in predicted:
score[index] += sc
final = []
pred = heapq.nlargest(24, range(len(score)), score.__getitem__)
for i in range(len(pred)):
if score[pred[i]] > 0:
final.append(pred[i])
if len(final) >= 7:
break
return final
def get_predictions(arr1, best, overallbest, customer):
predicted = []
hashes = get_hashes(arr1)
ncodpers = arr1[1]
customer_data = None
if ncodpers in customer:
customer_data = customer[ncodpers]
predicted = get_next_best_prediction(best, hashes, predicted, customer_data)
# overall
if len(predicted) < 7:
for a in overallbest:
# If user is not new
if ncodpers in customer:
if customer[ncodpers][a[0]] == '1':
continue
if a[0] not in predicted:
predicted.append(a[0])
if len(predicted) == 7:
break
return predicted
def get_real_values(arr1, customer):
real = []
ncodpers = arr1[1]
arr2 = arr1[24:]
for i in range(len(arr2)):
if arr2[i] == '1':
if ncodpers in customer:
if customer[ncodpers][i] == '0':
real.append(i)
else:
real.append(i)
return real
def run_solution():
print('Preparing arrays...')
f = open("../input/train_ver2.csv", "r")
first_line = f.readline().strip()
first_line = first_line.replace("\"", "")
map_names = first_line.split(",")[24:]
# Normal variables
customer = dict()
best = defaultdict(lambda: defaultdict(int))
overallbest = defaultdict(int)
# Validation variables
customer_valid = dict()
best_valid = defaultdict(lambda: defaultdict(int))
overallbest_valid = defaultdict(int)
valid_part = []
# Calc counts
total = 0
while 1:
line = f.readline()[:-1]
total += 1
if line == '':
break
tmp1 = line.split("\"")
arr = tmp1[0][:-1].split(",") + [tmp1[1]] + tmp1[2][1:].split(',')
arr = [a.strip() for a in arr]
# Normal part
add_data_to_main_arrays(arr, best, overallbest, customer, 'train')
# Valid part
if arr[0] != '2016-05-28':
add_data_to_main_arrays(arr, best_valid, overallbest_valid, customer_valid, 'valid')
else:
valid_part.append(arr)
if total % 1000000 == 0:
print('Process {} lines ...'.format(total))
# break
f.close()
print(distrib)
print('Sort best arrays...')
print('Hashes num: ', len(best))
print('Valid part: ', len(valid_part))
# Normal
best, overallbest = sort_main_arrays(best, overallbest)
# print(best)
# Valid
best_valid, overallbest_valid = sort_main_arrays(best_valid, overallbest_valid)
map7 = 0.0
print('Validation...')
for arr1 in valid_part:
predicted = get_predictions(arr1, best_valid, overallbest_valid, customer_valid)
real = get_real_values(arr1, customer_valid)
score = apk(real, predicted)
map7 += score
if len(valid_part) > 0:
map7 /= len(valid_part)
print('Predicted score: {}'.format(map7))
print('Generate submission...')
sub_file = os.path.join('submission_' + str(round(map7, 8)) + '.csv')
out = open(sub_file, "w")
f = open("../input/test_ver2.csv", "r")
f.readline()
total = 0
out.write("ncodpers,added_products\n")
while 1:
line = f.readline()[:-1]
total += 1
if line == '':
break
tmp1 = line.split("\"")
arr = tmp1[0][:-1].split(",") + [tmp1[1]] + tmp1[2][1:].split(',')
arr = [a.strip() for a in arr]
ncodpers = arr[1]
out.write(ncodpers + ',')
predicted = get_predictions(arr, best, overallbest, customer)
for p in predicted:
out.write(map_names[p] + ' ')
if total % 1000000 == 0:
print('Read {} lines ...'.format(total))
# break
out.write("\n")
print('Total cases:', str(total))
out.close()
f.close()
if __name__ == "__main__":
run_solution()
| [
"adithyagirish@berkeley.edu"
] | adithyagirish@berkeley.edu |
01d2ddee178d1defa696239f13487f9839fe1e8e | 6915b959a36273b1e4cd3d99faa0df8058b35726 | /Python/good_populate_sylvan_readyfiles_custommonth.py | 4838e0c9468d8367fbc7d15193cf324827d563b4 | [] | no_license | rtstock/WebScrapeStockMarketIndexes | 77c0d444a4294caac977629740f9f31a2c1e541f | e13f1c81095b9d691b07dcd5784ac8b40ab92f8b | refs/heads/master | 2020-12-30T09:26:04.002635 | 2017-08-15T18:57:16 | 2017-08-15T18:57:16 | 100,405,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,377 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 11:38:23 2015
@author: Justin.Malinchak
"""
import shutil
import datetime
import ntpath
import os
class perform:
def copyFile(src, dest):
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
def get_sheet_by_name(book, name):
"""Get a sheet by name from xlwt.Workbook, a strangely missing method.
Returns None if no sheet with the given name is present.
"""
# Note, we have to use exceptions for flow control because the
# xlwt API is broken and gives us no other choice.
import itertools
try:
for idx in itertools.count():
sheet = book.get_sheet(idx)
if sheet.name == name:
return sheet
except IndexError:
return None
def last_day_of_month(date):
if date.month == 12:
return date.replace(day=31)
return date.replace(month=date.month+1, day=1) - datetime.timedelta(days=1)
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
def __init__(self,
custommonthdate7
):
self._setup(
custommonthdate7
)
def _setup(self
, custommonthdate7
):
print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
print 'start'
# =============
# My Parameters
import config
template_inputfilename = config.sylvanready_template_inputfilename
destination_path = config.sylvanready_destination_path
#template_inputfilename = 'C:\\Batches\\Temp\\ssc excel\\%Index returns YYYY-MM inputs.xlsx'
#destination_path = 'P:\Apl\APL Benchmarks\Data\YYYY\MMM'
# =============
print template_inputfilename
listsp = [
('Barclays (muni)','Barclays (muni) Municipal Bond: 1 Year (1-2) (USDU)','LBM01')
,('Barclays (muni)','Barclays (muni) Municipal Bond (USDU)','LBMUNI')
,('Barclays (muni)','Barclays (muni) Municipal Bond: 3 Year (2-4) (USDU)','LBM03')
,('Barclays (muni)','Barclays (muni) Municipal Bond: 5 Year (4-6) (USDU)','LBM05B')
,('Barclays (muni)','Barclays (muni) Municipal Bond: 10 Year (8-12) (USDU)','SLMU')
,('Barclays (muni)','Barclays (muni) Municipal Bond: Long Bond (22+) (USDU)','LBLONG')
,('Barclays (muni)','Barclays (muni) 1-10 Yr Blend (1-12) (USDU)','LBBLND')
,('Barclays (muni)','Barclays (muni) Municipal Bond: 7 Year (6-8) (USDU)','LBM07')
#Municipal Bond: 7 Year (6-8) (USDU)
,('Barclays (agg)','Barclays (agg) U.S. Aggregate','SHLAGG')
,('Barclays (agg)','Barclays (agg) U.S. Aggregate Intermediate','LIAGBI')
,('Barclays (agg)','Barclays (agg) U.S. Gov/Credit','LEHMAN')
,('Barclays (agg)','Barclays (agg) U.S. Gov/Credit Intermediate','SHLGCI')
,('Barclays (agg)','Barclays (agg) U.S. Government 1-3 Yr','LBG13')
,('Barclays (agg)','Barclays (agg) U.S. Treasury','LBGTSY')
#U.S. Government
,('Barclays (agg)','Barclays (agg) U.S. Government Intermediate','LBGINT')
,('Barclays (agg)','Barclays (agg) U.S. Treasury Intermediate','LBTII')
,('Barclays (agg)','Barclays (agg) U.S. Credit Intermediate','LBICB')
,('Barclays (agg)','Barclays (agg) U.S. Agency','LBUSAB')
,('Barclays (aggr)','Barclays (aggr) U.S. Treasury: U.S. TIPS (USDU)','LBTIPS')
,('Barclays (gcs)','Barclays (gcs) Treasury Trsy 1-3 Year','LGT1-3')
,('Barclays (gcs)','Barclays (gcs) Treasury Treasury Bills','LTBILL')
,('Barclays (hyd)','Barclays (hyd) U.S. Corporate High Y','BCHIYD')
,('Barclays (hyd)','Barclays (hyd) U.S. Corporate High Y Intermediate','LBIHY')
,('Barclays (hyd)','Barclays (hyd) Ba','LEHHIY')
,('Barclays (belw)','Barclays (belw) U.S. Treasury Bellwethers: 2 Year (USDU)','L2YTBW')
,('MSCI','MSCI DM Gross WORLD','MSCIWD')
,('MSCI','MSCI DM Gross WORLD ex USA','MSCIXU')
,('MSCI','MSCI AC Gross ACWI','MSCIAC')
,('MSCI','MSCI AC Gross ACWI ex USA','MSCIAW')
,('MSCI','MSCI EM Gross EM (EMERGING MARKETS)','MSCIEM')
,('MSCI','MSCI CM Gross GOLDEN DRAGON','MSCIGD')
,('MSCI','MSCI DM Net EAFE','EAFENT')
,('MSCI','MSCI DM Net WORLD','MSCIWN')
,('MSCI','MSCI DM Net WORLD ex USA','MSCIXN')
,('MSCI','MSCI AC Net ACWI','MSCIAN')
,('MSCI','MSCI AC Net ACWI ex USA','MSCIAX')
,('MSCI','MSCI DM Gross Value EAFE Value','EAFEV')
,('MSCI','MSCI DM Net Value EAFE Value','EAFEVN')
,('NAREIT','NAREIT Equity REITs','NAREIT')
,('Wilshire','Wilshire REIT','WIREIT')
,('HFRX','HFRX Global Hedge Fund Index','HFRXGH')
,('HFRX','HFRX Equity Hedge Index','HFRXGI')
]
#MSCI AC Gross ACWI ex USA
import adodbapi
import pandas.io.sql as psql
#conn = adodbapi.connect("Provider=SQLNCLI10;Server=IPC-VSQL01;Initial Catalog=DataAgg;Trusted_Connection=yes;")
conn = adodbapi.connect(config.connectstring_for_dataagg)
# ------------------------------------
# Get the max period from the database
import os.path
maxperiod = ''
query = "Select Max(Period) MaxPeriod from ProductValues where Period = '"+custommonthdate7+"';"
df = psql.read_sql(query, conn)
firstcell = df.iloc[0,0]
print 'first cell is',df.iloc[0,0]
if firstcell == None:
print 'no data for month ' + custommonthdate7
else:
maxperiod = df.iloc[0]['MaxPeriod']
FoundYear = int(maxperiod[:4])
FoundMonth= int(maxperiod[-2:])
lastdayofmaxperiod = self.last_day_of_month(datetime.date(FoundYear, FoundMonth, 1))
lastdayofmaxperiod_formatted = lastdayofmaxperiod.strftime('%m/%d/%Y')
print 'lastdayofmaxperiod_formatted',lastdayofmaxperiod_formatted
year_asstring = lastdayofmaxperiod.strftime('%Y')
shortmonthname = lastdayofmaxperiod.strftime("%B") # 'dec'
resolved_destination_path = destination_path.replace('YYYY',year_asstring).replace('MMM',shortmonthname)
print 'resolved_destination_path',resolved_destination_path
# ---------------------
# Get the return values
dict_returns = {}
if len(maxperiod) > 0:
for item in listsp:
query = "Select top 1 * from ProductValues where Measure = 'Returns Monthly' and SourceName = '"+item[0]+"' and ProductName = '"+item[1]+"' and Period = '"+maxperiod+"';"
print query
df = psql.read_sql(query, conn)
#print df
if len(df.index) > 0:
thenumber = df.iloc[0]['DataValue']
print item[0], '------',item[2],'------',item[1],'=',thenumber
dict_returns[item[2]] = thenumber
# --------------------
# Close the connection
conn.close
# ------------------------------------------------
# Create the Excel file from template if necessary
if len(maxperiod) > 0:
print 'maxperiod:',maxperiod
inputfilename = template_inputfilename.replace('YYYY-MM',maxperiod).replace('%','')
month_asminimalcharacters = str(int(lastdayofmaxperiod.strftime("%m")))
year_as2character = (lastdayofmaxperiod.strftime("%Y"))[-2:]
resolved_destination_pathfile = resolved_destination_path + '\\' + self.path_leaf(template_inputfilename).replace('YYYY-MM',month_asminimalcharacters+'-'+year_as2character).replace('%','').replace('inputs','inputs (automated)')
if not os.path.isfile(inputfilename):
print 'file',inputfilename, 'does not exist'
self.copyFile(template_inputfilename,inputfilename)
if not os.path.isfile(inputfilename):
print 'tried to copy',template_inputfilename,'to make the file, but process failed.'
else:
print 'ok ok, your file now exists.'
# ---------------------------------------
# Now open the excel file and populate it
# ------------------------------------------------------------------------------------
# Couldn't find much from pandas on how to do it, you may want to look into this later
# import pandas as pd
# xl = pd.ExcelFile(inputfilename)
#
# for sn in xl.sheet_names:
# print sn
# print sn.cells(1,18)
# xl.close
# -----------------------
# This seems to work fine
if os.path.isfile(inputfilename):
# ------------------------------------
# Get the data into a dictionary first
dict_ref = {}
from xlrd import open_workbook
book = open_workbook(inputfilename,on_demand=True)
sheet = book.sheet_by_name('bench')
for k,v in dict_returns.items():
# Attempt to find a matching row (search the first column for 'john')
rowIndex = -1
for cell in sheet.col(1): #
rowIndex = rowIndex + 1
if k == cell.value:
print 'found it:',k,v,sheet.cell(rowIndex,3).value,rowIndex+1
dict_ref[k] = [rowIndex+1,v]
# --------------------------------
# Then populate the worksheet here
from win32com.client import Dispatch
xlApp = Dispatch("Excel.Application")
xlApp.Visible = 1
xlApp.Workbooks.Open(inputfilename)
for k,v in dict_ref.items():
print k,v
xlApp.ActiveWorkbook.ActiveSheet.Cells(v[0],4).Value = v[1]
xlApp.ActiveWorkbook.ActiveSheet.cells(v[0],3).Value = lastdayofmaxperiod_formatted
xlApp.ActiveWorkbook.Close(SaveChanges=1) # see note 1
xlApp.Quit()
xlApp.Visible = 0 # see note 2
del xlApp
self.assure_path_exists(resolved_destination_pathfile)
self.copyFile(inputfilename,resolved_destination_pathfile)
print 'You can find your final file here:',resolved_destination_pathfile
## =====================================
#print 'my test here'
#print '----------'
#query = "Select * from ProductValues where SourceName = 'MSCI' and ProductName like '%USA%' and Period = '2015-05';"
#df = psql.read_sql(query, conn)
#print df
## =====================================
if __name__=='__main__':
o = perform('2015-08')
| [
"noreply@github.com"
] | rtstock.noreply@github.com |
e42f7d87fcb7c317bd746f39df25e50c7f71906b | a5884eb2aed17c25a710370654f28b0b70a4441f | /config.py | 9cbdcb715ad5bbf103039f0b43bbf1a9aaa12f57 | [
"Apache-2.0"
] | permissive | craigderington/celery-example-1 | 9ba2c6c94f422b33f6a73e74109cc20bba47cb3d | 6fdae655e512f96eeb9dbb109c647ae56f357bc9 | refs/heads/master | 2020-07-10T15:30:07.447621 | 2019-09-05T18:03:57 | 2019-09-05T18:03:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import os
# Debug
DEBUG=True
# Celery Settings
CELERY_BROKER_URL = "pyamqp://rabbitmq:5672/"
CELERY_RESULT_BACKEND = "redis://redis:6379/0"
TASK_SERIALIZER = "json"
RESULT_SERIALZIER = "json"
ACCEPT_CONTENT = ["json"]
# Timezome
TIMEZONE = "America/New_York"
ENABLE_UTC = True
# APIs
IPINFO_API_URL = "http://ip-api.com/json/"
NEUTRINO_API_URL = "https://neutrinoapi.com/"
# Environment Settings
# NEUTRINO_API_KEY = os.environ.get("NEUTRINO_API_KEY")
# NEUTRINO_API_USERNAME = os.environ.get("NEUTRINO_API_USERNAME")
# BIGFIX_USER = os.environ.get("BF_USER")
# BIGFIX_PWD = os.environ.get("BF_PWD")
| [
"craig@craigderington.me"
] | craig@craigderington.me |
f9edeab19833d57cf4c3106a6ba45a14f730ac46 | 2b8eedab36c6fbcec3c8d19e882e84aaffa9ef7f | /pure_pursuit_navigation/scripts/mobile_robot_pure_pursuit.py | e3b4c1a8b1eabc9e1a139b41d36b8e8792398c20 | [] | no_license | ROSDevoloper/Atlas80Gen1-AMR | ad29c1a46f36bc54dc223b0982b643f6b42c9f54 | ef2b1845b1d77321f4a009f1f32ecdb690cd618e | refs/heads/master | 2022-11-15T22:11:54.161847 | 2020-07-16T09:27:53 | 2020-07-16T09:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,372 | py | #!/usr/bin/env python
import rospy
import numpy as np
import time
import utils
from std_msgs.msg import Bool
from geometry_msgs.msg import PolygonStamped, Twist
from visualization_msgs.msg import Marker
from nav_msgs.msg import Odometry
class PurePursuit(object):
""" Implements Pure Pursuit trajectory tracking with a fixed lookahead and speed.
Set point determined with the method described here:
http://www.ri.cmu.edu/pub_files/pub4/howard_thomas_2006_1/howard_thomas_2006_1.pdf
Relies on localization for ground truth vehicle position. """
def __init__(self):
self.trajectory_topic = rospy.get_param("~trajectory_topic")
self.odom_topic = rospy.get_param("~odom_topic")
self.check_drive_topic = rospy.get_param("~check_drive_topic")
self.drive_topic = rospy.get_param("~drive_topic")
self.lookahead = rospy.get_param("~lookahead")
self.max_reacquire = rospy.get_param("~max_reacquire")
self.controller_freq = float(rospy.get_param("~controller_freq", "10.0"))
self.wrap = bool(rospy.get_param("~wrap"))
wheelbase_length = float(rospy.get_param("~wheelbase"))
self.KV = float(rospy.get_param("~KV"))
self.KW = float(rospy.get_param("~KW"))
self.trajectory = utils.LineTrajectory("/followed_trajectory")
self.model = utils.AckermannModel(wheelbase_length)
self.do_viz = True
self.odom_timer = utils.Timer(10)
self.iters = 0
self.d_t = float(1/self.controller_freq)
self.check_drive = True
self.nearest_point = None
self.lookahead_point = None
# set up the visualization topic to show the nearest point on the trajectory, and the lookahead point
self.viz_name = "/pure_pursuit"
self.nearest_point_pub = rospy.Publisher(self.viz_name + "/nearest_point", Marker, queue_size = 1)
self.lookahead_point_pub = rospy.Publisher(self.viz_name + "/lookahead_point", Marker, queue_size = 1)
# topic to send drive commands to
self.control_pub = rospy.Publisher(self.drive_topic, Twist, queue_size =1 )
# topic to listen for trajectories
self.traj_sub = rospy.Subscriber(self.trajectory_topic, PolygonStamped, self.trajectory_callback, queue_size=1)
# topic to listen for odometry messages, either from particle filter or robot localization
self.odom_sub = rospy.Subscriber(self.odom_topic, Odometry, self.odom_callback, queue_size=1)
# topic to listen whether drive the vehicle or not
self.check_drive_sub = rospy.Subscriber(self.check_drive_topic, Bool, self.check_driveCB, queue_size=1)
print "Initialized. Waiting on messages..."
def visualize(self):
''' Publishes visualization topics:
- Circle to indicate the nearest point along the trajectory
- Circle to indicate the chosen lookahead point '''
if not self.do_viz:
return
# visualize: pure pursuit circle, lookahead intersection, lookahead radius line, nearest point
if self.nearest_point_pub.get_num_connections() > 0 and isinstance(self.nearest_point, np.ndarray):
self.nearest_point_pub.publish(utils.make_circle_marker(
self.nearest_point, 0.5, [0.0,0.0,1.0], "/map", self.viz_name, 0, 3))
if self.lookahead_point_pub.get_num_connections() > 0 and isinstance(self.lookahead_point, np.ndarray):
self.lookahead_point_pub.publish(utils.make_circle_marker(
self.lookahead_point, 0.5, [1.0,1.0,1.0], "/map", self.viz_name, 1, 3))
def check_driveCB(self, msg):
if(msg.data == True):
self.check_drive = True
elif(msg.data == False):
self.check_drive = False
def trajectory_callback(self, msg):
''' Clears the currently followed trajectory, and loads the new one from the
message '''
print "Receiving new trajectory:", len(msg.polygon.points), "points"
self.trajectory.clear()
self.trajectory.fromPolygon(msg.polygon)
self.trajectory.publish_viz(duration=0.0)
def odom_callback(self, msg):
''' Extracts robot state information from the message, and executes pure pursuit
control. '''
pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, utils.quaternion_to_angle(msg.pose.pose.orientation)])
self.pure_pursuit(pose)
# this is for timing info
self.odom_timer.tick()
self.iters += 1
if self.iters % 20 == 0:
print "Control fps:", self.odom_timer.fps()
def pure_pursuit(self, pose):
''' Determines and applies Pure Pursuit control law
1. Find the nearest point on the trajectory
2. Traverse the trajectory looking for the nearest point that is the
lookahead distance away from the car, and further along the path than
the nearest point from step (1). This is the lookahead point.
3. Determine steering angle necessary to travel to the lookahead point from
step (2)
4. Send the desired speed and steering angle commands to the robot
Special cases:
- If nearest_point is beyond the max path reacquisition distance, stop
- If nearest_point is between max reacquisition dist and lookahead dist,
navigate to nearest_point
- If nearest_point is less than the lookahead distance, find the
lookahead point as normal '''
# stop if no trajectory has been received
if self.trajectory.empty():
return self.stop()
# this instructs the trajectory to convert the list of waypoints into a numpy matrix
if self.trajectory.dirty():
self.trajectory.make_np_array()
# step 1
nearest_point, nearest_dist, t, i = utils.nearest_point_on_trajectory(pose[:2], self.trajectory.np_points)
self.nearest_point = nearest_point
if nearest_dist < self.lookahead:
# step 2
lookahead_point, i2, t2 = \
utils.first_point_on_trajectory_intersecting_circle(pose[:2], self.lookahead, self.trajectory.np_points, i+t, wrap=self.wrap)
if i2 == None:
if self.iters % 5 == 0:
print "Could not find intersection, end of path?"
self.lookahead_point = None
else:
if self.iters % 5 == 0:
print "found lookahead point"
self.lookahead_point = lookahead_point
elif nearest_dist < self.max_reacquire:
if self.iters % 5 == 0:
print "Reacquiring trajectory"
self.lookahead_point = self.nearest_point
else:
self.lookahead_point = None
# stop of there is no navigation target, otherwise use mobile robot geometry to navigate there
if not isinstance(self.lookahead_point, np.ndarray):
self.stop()
else:
if(self.check_drive == True):
xspeed, rotspeed = self.determine_speeds(pose, self.lookahead_point)
# send the control commands
self.apply_control(xspeed, rotspeed)
else:
self.stop()
self.visualize()
def determine_speeds(self, pose, lookahead_point):
'''Given a robot pose, and a lookahead point, determine the open loop control
necessary to navigate to that lookahead point. Use Differential Robot steering
geometry. '''
# get the lookahead point in the coordinate frame of the car
delta_x = lookahead_point[0] - pose[0]
delta_y = lookahead_point[1] - pose[1]
w_v = 2*(delta_y*np.cos(pose[2]) - delta_x*np.sin(pose[2]))/(delta_x*delta_x+delta_y*delta_y)
theta_d = np.arctan2((w_v*delta_x + np.sin(pose[2])), (-w_v*delta_y + np.cos(pose[2])))
delta_theta = theta_d - pose[2]
# use the differential robot model
if delta_theta >= np.pi:
delta_theta = delta_theta - 2*np.pi
elif delta_theta <= -np.pi:
delta_theta = delta_theta + 2*np.pi
else:
delta_theta = delta_theta
rot_speed = delta_theta/self.d_t
a = np.sin(theta_d) - np.sin(pose[2])
b = np.cos(pose[2]) - np.cos(theta_d)
x_speed = delta_theta*(delta_x*a/self.d_t + delta_y*b/self.d_t)/(a*a+b*b)
rot_speed = self.KW*rot_speed
x_speed = self.KV*x_speed
return x_speed, rot_speed
def apply_control(self, linear_speed, rotation_speed):
drive_msg = Twist()
drive_msg.linear.x = linear_speed
drive_msg.linear.y = 0
drive_msg.linear.z = 0
drive_msg.angular.x = 0
drive_msg.angular.y = 0
drive_msg.angular.z = rotation_speed
self.control_pub.publish(drive_msg)
def stop(self):
print "Stopping"
drive_msg = Twist()
drive_msg.linear.x = 0
drive_msg.linear.y = 0
drive_msg.linear.z = 0
drive_msg.angular.x = 0
drive_msg.angular.y = 0
drive_msg.angular.z = 0
self.control_pub.publish(drive_msg)
if __name__=="__main__":
rospy.init_node("pure_pursuit")
pf = PurePursuit()
rospy.spin()
| [
"kienho91@gmail.com"
] | kienho91@gmail.com |
12c222037cccef2366cde89f0c450753c7f9765a | d7218d554c9d89369c0677256802ea1eb5995dd0 | /repos/cookiecutter/tasks/config.py | 7c3e457a15e8992724681556d9c943f1c27fe8a1 | [
"MIT"
] | permissive | salotz/jubeo | bf6f76d64e8774b4f1f51a8ddbaeb345d2c3676e | 8b1d715af708a7c933d5c9459c3e2ddb7d40d741 | refs/heads/master | 2022-12-10T17:38:45.957322 | 2021-02-20T02:08:31 | 2021-02-20T02:08:31 | 246,981,335 | 1 | 0 | MIT | 2022-12-08T09:51:30 | 2020-03-13T03:34:22 | Python | UTF-8 | Python | false | false | 166 | py | """User settings for a project."""
# load the system configuration. You can override them in this module,
# but beware it might break stuff
from .sysconfig import *
| [
"samuel.lotz@salotz.info"
] | samuel.lotz@salotz.info |
62520424d1b4e94fed465462661aee85fa68e102 | b29149eeee6a2351fb2904415ad751b009d80dad | /mopidy_internetarchive/backend.py | d780b3153875b51da53ab9254e34c9b153f71cd1 | [
"Apache-2.0"
] | permissive | tkem/mopidy-internetarchive | 7a33d24f6c32ca9ac040531e5725a1eeadf3fa38 | 2b6100a412120c828da8899b81562237fb808840 | refs/heads/master | 2022-05-06T19:31:44.909430 | 2022-04-03T19:57:11 | 2022-04-03T19:57:11 | 15,807,922 | 15 | 3 | Apache-2.0 | 2021-08-03T23:17:26 | 2014-01-10T19:52:25 | Python | UTF-8 | Python | false | false | 1,338 | py | import pykka
from mopidy import backend, httpclient
import cachetools
from . import Extension
from .client import InternetArchiveClient
from .library import InternetArchiveLibraryProvider
from .playback import InternetArchivePlaybackProvider
def _cache(cache_size=None, cache_ttl=None, **kwargs):
if cache_size is None:
return None
elif cache_ttl is None:
return cachetools.LRUCache(cache_size)
else:
return cachetools.TTLCache(cache_size, cache_ttl)
class InternetArchiveBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = [Extension.ext_name]
def __init__(self, config, audio):
super().__init__()
ext_config = config[Extension.ext_name]
self.client = client = InternetArchiveClient(
ext_config["base_url"],
retries=ext_config["retries"],
timeout=ext_config["timeout"],
)
product = f"{Extension.dist_name}/{Extension.version}"
client.useragent = httpclient.format_user_agent(product)
proxy = httpclient.format_proxy(config["proxy"])
client.proxies.update({"http": proxy, "https": proxy})
client.cache = _cache(**ext_config)
self.library = InternetArchiveLibraryProvider(ext_config, self)
self.playback = InternetArchivePlaybackProvider(audio, self)
| [
"tkemmer@computer.org"
] | tkemmer@computer.org |
c4f45c81403357ba2cfc207ac9c0e577451983ad | d04d3eec289376e7682403af2f32044b3991d27b | /11 - Common Lectures/BitOper-Lab-2.py | e7114889c50f9aa9975e5de759110ac592361e66 | [] | no_license | m-evtimov96/softUni-python-fundamentals | 190002dbc6196211340126814e8ed4fce3b8a07f | 817a44a3d78130d37e58facfc7bcfdc8af5f4051 | refs/heads/master | 2020-12-10T12:45:27.847764 | 2020-06-23T13:09:43 | 2020-06-23T13:09:43 | 233,598,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | number = int(input())
binary_number = bin(number)[2:]
print(binary_number[::-1][1:2])
| [
"m.evtimov196@gmail.com"
] | m.evtimov196@gmail.com |
fde1091a1bbb0636bbd66e2ab043542168d476e1 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/domainregistration/v20210101/domain_ownership_identifier.py | fbe26a56590543e739a7b7a4cc922489b15d3e78 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 10,784 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['DomainOwnershipIdentifierArgs', 'DomainOwnershipIdentifier']
@pulumi.input_type
class DomainOwnershipIdentifierArgs:
def __init__(__self__, *,
domain_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DomainOwnershipIdentifier resource.
:param pulumi.Input[str] domain_name: Name of domain.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of identifier.
:param pulumi.Input[str] ownership_id: Ownership Id.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if ownership_id is not None:
pulumi.set(__self__, "ownership_id", ownership_id)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
Name of domain.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of identifier.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ownershipId")
def ownership_id(self) -> Optional[pulumi.Input[str]]:
"""
Ownership Id.
"""
return pulumi.get(self, "ownership_id")
@ownership_id.setter
def ownership_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ownership_id", value)
class DomainOwnershipIdentifier(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Domain ownership Identifier.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_name: Name of domain.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of identifier.
:param pulumi.Input[str] ownership_id: Ownership Id.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DomainOwnershipIdentifierArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Domain ownership Identifier.
:param str resource_name: The name of the resource.
:param DomainOwnershipIdentifierArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainOwnershipIdentifierArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainOwnershipIdentifierArgs.__new__(DomainOwnershipIdentifierArgs)
if domain_name is None and not opts.urn:
raise TypeError("Missing required property 'domain_name'")
__props__.__dict__["domain_name"] = domain_name
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
__props__.__dict__["ownership_id"] = ownership_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:domainregistration/v20210101:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20150401:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20150401:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20180201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20180201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20190801:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20190801:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20200601:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20200601:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20200901:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20200901:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20201001:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20201001:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20201201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20201201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20210115:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-nextgen:domainregistration/v20210115:DomainOwnershipIdentifier")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DomainOwnershipIdentifier, __self__).__init__(
'azure-native:domainregistration/v20210101:DomainOwnershipIdentifier',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DomainOwnershipIdentifier':
"""
Get an existing DomainOwnershipIdentifier resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DomainOwnershipIdentifierArgs.__new__(DomainOwnershipIdentifierArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["ownership_id"] = None
__props__.__dict__["type"] = None
return DomainOwnershipIdentifier(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownershipId")
def ownership_id(self) -> pulumi.Output[Optional[str]]:
"""
Ownership Id.
"""
return pulumi.get(self, "ownership_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
9edbd3d9de0f548de026e32489b971d5050b5d26 | 519f1ac2b8ca9ee2793af13a88eec6eef7c2637d | /rosalind/GCON.py | 7c3bd9f98bccce480c6b529763717996320e61b3 | [] | no_license | teju85/programming | c4da3493b4cf96b8f52da9bb209636cd898310a5 | 5d64b3f5cc868f7a5ad1bac889d69da9dbe356cd | refs/heads/master | 2021-06-16T07:09:25.159021 | 2017-06-05T04:36:07 | 2017-06-05T04:36:07 | 26,383,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,499 | py | import sys
from common import readFasta
syms = {
'A' : 0,
'C' : 1,
'D' : 2,
'E' : 3,
'F' : 4,
'G' : 5,
'H' : 6,
'I' : 7,
'K' : 8,
'L' : 9,
'M' : 10,
'N' : 11,
'P' : 12,
'Q' : 13,
'R' : 14,
'S' : 15,
'T' : 16,
'V' : 17,
'W' : 18,
'Y' : 19,
}
score = [
[ 4, 0, -2, -1, -2, 0, -2, -1, -1, -1, -1, -2, -1, -1, -1, 1, 0, 0, -3, -2],
[ 0, 9, -3, -4, -2, -3, -3, -1, -3, -1, -1, -3, -3, -3, -3, -1, -1, -1, -2, -2],
[-2, -3, 6, 2, -3, -1, -1, -3, -1, -4, -3, 1, -1, 0, -2, 0, -1, -3, -4, -3],
[-1, -4, 2, 5, -3, -2, 0, -3, 1, -3, -2, 0, -1, 2, 0, 0, -1, -2, -3, -2],
[-2, -2, -3, -3, 6, -3, -1, 0, -3, 0, 0, -3, -4, -3, -3, -2, -2, -1, 1, 3],
[ 0, -3, -1, -2, -3, 6, -2, -4, -2, -4, -3, 0, -2, -2, -2, 0, -2, -3, -2, -3],
[-2, -3, -1, 0, -1, -2, 8, -3, -1, -3, -2, 1, -2, 0, 0, -1, -2, -3, -2, 2],
[-1, -1, -3, -3, 0, -4, -3, 4, -3, 2, 1, -3, -3, -3, -3, -2, -1, 3, -3, -1],
[-1, -3, -1, 1, -3, -2, -1, -3, 5, -2, -1, 0, -1, 1, 2, 0, -1, -2, -3, -2],
[-1, -1, -4, -3, 0, -4, -3, 2, -2, 4, 2, -3, -3, -2, -2, -2, -1, 1, -2, -1],
[-1, -1, -3, -2, 0, -3, -2, 1, -1, 2, 5, -2, -2, 0, -1, -1, -1, 1, -1, -1],
[-2, -3, 1, 0, -3, 0, 1, -3, 0, -3, -2, 6, -2, 0, 0, 1, 0, -3, -4, -2],
[-1, -3, -1, -1, -4, -2, -2, -3, -1, -3, -2, -2, 7, -1, -2, -1, -1, -2, -4, -3],
[-1, -3, 0, 2, -3, -2, 0, -3, 1, -2, 0, 0, -1, 5, 1, 0, -1, -2, -2, -1],
[-1, -3, -2, 0, -3, -2, 0, -3, 2, -2, -1, 0, -2, 1, 5, -1, -1, -3, -3, -2],
[ 1, -1, 0, 0, -2, 0, -1, -2, 0, -2, -1, 1, -1, 0, -1, 4, 1, -2, -3, -2],
[ 0, -1, -1, -1, -2, -2, -2, -1, -1, -1, -1, 0, -1, -1, -1, 1, 5, 0, -2, -2],
[ 0, -1, -3, -2, -1, -3, -3, 3, -2, 1, 1, -3, -2, -2, -3, -2, 0, 4, -3, -1],
[-3, -2, -4, -3, 1, -2, -2, -3, -3, -2, -1, -4, -4, -2, -3, -3, -2, -3, 11, 2],
[-2, -2, -3, -2, 3, -3, 2, -1, -2, -1, -1, -2, -3, -1, -2, -2, -2, -1, 2, 7]
]
gapPenalty = -5
def editDistance(s, t):
ls = len(s) + 1
lt = len(t) + 1
mat = [ [0 for j in range(0,lt)] for i in range(0,ls)]
gap = [ [False for j in range(0,lt)] for i in range(0,ls)]
for i in range(1,ls):
mat[i][0] = gapPenalty
gap[i][0] = True
for j in range(1,lt):
mat[0][j] = gapPenalty
gap[0][j] = True
for i in range(1,ls):
sa = s[i-1]
for j in range(1,lt):
ta = t[j-1]
scoreVal = score[syms[sa]][syms[ta]]
if sa == ta:
mat[i][j] = mat[i-1][j-1] + scoreVal
else:
agap = gap[i-1][j]
if agap:
a = mat[i-1][j]
else:
a = mat[i-1][j] + gapPenalty
bgap = gap[i][j-1]
if bgap:
b = mat[i][j-1]
else:
b = mat[i][j-1] + gapPenalty
c = mat[i-1][j-1] + scoreVal
maxi = max(a, b, c)
mat[i][j] = maxi
if maxi == a or maxi == b:
gap[i][j] = True
print s
print t
for m in mat:
for n in m:
print n,
print
for m in gap:
for n in m:
print n,
print
return mat[-1][-1]
if __name__ == '__main__':
dnas = readFasta(sys.argv[1])
print editDistance(dnas[0][1], dnas[1][1])
| [
"rao.thejaswi@gmail.com"
] | rao.thejaswi@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.