blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c291ea9fcc40f9a2104f72fda45a772db6fc67c3 | 6fce07f704880861ed472706cce973ff81be9ca4 | /tests/test_threadutils.py | bdb8ac5bf21cd304d9f63fe29c2823f749a52ad5 | [
"MIT"
] | permissive | AbdulSaleh/dialog-probing | 6645d5c2be10dc0342d6f6c7a768e46e4080c068 | 12a04e7ca3363d428aca96e8c2c2ce2ec518a767 | refs/heads/master | 2023-04-08T22:29:13.531668 | 2020-06-17T19:27:49 | 2020-06-17T19:27:49 | 210,482,746 | 9 | 2 | MIT | 2023-03-24T23:29:34 | 2019-09-24T01:14:42 | Python | UTF-8 | Python | false | false | 3,078 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.thread_utils import SharedTable
from multiprocessing import Process
import parlai.core.testing_utils as testing_utils
import unittest
import random
import time
@testing_utils.skipIfGPU
class TestSharedTable(unittest.TestCase):
"""Make sure the package is alive."""
def test_init_from_dict(self):
d = {'a': 0, 'b': 1, 'c': 1.0, 'd': True, 1: False, 2: 2.0}
st = SharedTable(d)
for k, v in d.items():
assert st[k] == v
def test_get_set_del(self):
st = SharedTable({'key': 0})
try:
st['none']
self.fail('did not fail on nonexistent key')
except KeyError:
pass
st['key'] = 1
assert st['key'] == 1
st['key'] += 1
assert st['key'] == 2
try:
st['key'] = 2.1
self.fail('cannot change type of value for set keys')
except TypeError:
pass
del st['key']
assert 'key' not in st, 'key should have been removed from table'
try:
st['key'] = True
self.fail('cannot change removed key')
except KeyError:
pass
def test_iter_keys(self):
st = SharedTable({'key': 0, 'ctr': 0.0, 'val': False, 'other': 1})
assert len(st) == 4
del st['key']
assert len(st) == 3, 'length should decrease after deleting key'
keyset1 = set(iter(st))
keyset2 = set(st.keys())
assert keyset1 == keyset2, 'iterating should return keys'
assert len(keyset1) == 3, ''
def test_concurrent_access(self):
st = SharedTable({'cnt': 0})
def inc():
for _ in range(50):
with st.get_lock():
st['cnt'] += 1
time.sleep(random.randint(1, 5) / 10000)
threads = []
for _ in range(5): # numthreads
threads.append(Process(target=inc))
for t in threads:
t.start()
for t in threads:
t.join()
assert st['cnt'] == 250
def test_torch(self):
try:
import torch
except ImportError:
# pass by default if no torch available
return
st = SharedTable({'a': torch.FloatTensor([1]), 'b': torch.LongTensor(2)})
assert st['a'][0] == 1.0
assert len(st) == 2
assert 'b' in st
del st['b']
assert 'b' not in st
assert len(st) == 1
if torch.cuda.is_available():
st = SharedTable(
{'a': torch.cuda.FloatTensor([1]), 'b': torch.cuda.LongTensor(2)}
)
assert st['a'][0] == 1.0
assert len(st) == 2
assert 'b' in st
del st['b']
assert 'b' not in st
assert len(st) == 1
if __name__ == '__main__':
unittest.main()
| [
"a_saleh@matlaber5.media.mit.edu"
] | a_saleh@matlaber5.media.mit.edu |
4dc6472a0122338c0b83a65c1f1562003a7e74ba | 204db0d292bfe63253f737e6a73c443c0359b629 | /HitAnalyzer/test/draw_clustersize.py | 492ee21a02d8d12bd42b8b20cc2db6eee1db8939 | [] | no_license | gitytakahas/DPGAnalysis-SiPixelTools | 69d6de11974be421086ccb19d6fe760ddec986ae | 9088daece2f85f4fd0f5def51cdb30203f4b3b23 | refs/heads/master | 2021-01-17T08:31:08.047601 | 2017-06-28T14:38:01 | 2017-06-28T14:38:01 | 64,846,554 | 0 | 0 | null | 2016-08-03T13:04:29 | 2016-08-03T13:04:29 | null | UTF-8 | Python | false | false | 3,935 | py | from officialStyle import officialStyle
from ROOT import TFile, TTree, TH2F, TCanvas, gROOT, gStyle, TH1F, TLegend
import copy
gROOT.SetBatch(True)
officialStyle(gStyle)
gStyle.SetOptTitle(0)
def LegendSettings(leg):
leg.SetBorderSize(0)
leg.SetFillColor(10)
leg.SetLineColor(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.03)
leg.SetTextFont(42)
#type='zerobias'
#type='random'
layers = [1,2,3,4]
#layers = [1]
xmax = [10, 3, 2, 1]
types = ['random', 'zerobias']
ladder = [13, 29, 45, 65]
lmax = [6.5, 14.5, 22.5, 32.5]
h_occupancy = {}
for type in types:
file = TFile('Myroot_' + type + '.root')
tree = file.Get('cluster_tree')
h_occupancy_ = []
for layer in layers:
hname = 'hist_L' + str(layer)
# hist = TH2F(hname, hname, 56, -28, 28, 10000,0,10000)
hist = TH2F(hname, hname, 20, -28, 28, 10000,0,10000)
hist.GetXaxis().SetTitle('Z (mm)')
hist.GetYaxis().SetTitle('Cluster size')
tree.Draw("ch:zPos >> " + hname, "subid==1 && layer==" + str(layer))
cname = 'canvas_' + str(layer)
canvas = TCanvas(cname)
canvas.SetGridx()
canvas.SetGridy()
hist.Draw('colz')
hist_occ = hist.ProfileX()
hist_occ.GetYaxis().SetNdivisions(505)
hist_occ.Sumw2()
hist_occ.SetLineColor(1)
# hist_occ.Draw('psame')
canvas.SaveAs('plot/cluster_L'+str(layer) + '_' + type + '.gif')
## zoom
hname_zoom = 'hist_zoom_L' + str(layer)
hist_zoom = TH2F(hname_zoom, hname_zoom, 20, -28, 28, 100,0,200)
hist_zoom.GetXaxis().SetTitle('Z (mm)')
hist_zoom.GetYaxis().SetTitle('Cluster size')
tree.Draw("ch:zPos >> " + hname_zoom, "subid==1 && layer==" + str(layer))
cname_zoom = 'canvas_zoom_' + str(layer)
canvas_zoom = TCanvas(cname_zoom)
canvas_zoom.SetGridx()
canvas_zoom.SetGridy()
hist_zoom.Draw('colz')
# hist_occ.Draw('psame')
hist.Draw('candlex(10000311) same')
canvas_zoom.SaveAs('plot/cluster_zoom_L'+str(layer) + '_' + type + '.gif')
# h_occupancy_.append(copy.deepcopy(hist_zoom))
h_occupancy_.append(copy.deepcopy(hist))
# h_occupancy_.append(copy.deepcopy(hist_occ))
h_occupancy[type] = h_occupancy_
print h_occupancy
# LegendSettings(leg,len(hists))
gStyle.SetPadRightMargin(0.1)
gStyle.SetPadLeftMargin(0.18)
types.reverse()
for layer in layers:
cname = 'occupancy_' + str(layer)
canvas_layer = TCanvas(cname)
leg = TLegend(0.5,0.7,0.9,0.9)
LegendSettings(leg)
for index, type in enumerate(types):
# h_occupancy[type][layer-1].Scale(1./h_occupancy[type][layer-1].GetSumOfWeights())
h_occupancy[type][layer-1].SetLineWidth(2)
h_occupancy[type][layer-1].SetLineColor(index+1)
h_occupancy[type][layer-1].SetMarkerColor(index+1)
h_occupancy[type][layer-1].SetLineStyle(index+1)
h_occupancy[type][layer-1].GetXaxis().SetTitle('Z (mm)')
h_occupancy[type][layer-1].GetYaxis().SetTitle('Cluster size')
h_occupancy[type][layer-1].GetYaxis().SetTitleOffset(1.5)
h_occupancy[type][layer-1].GetYaxis().SetRangeUser(0,200)
h_occupancy[type][layer-1].SetMaximum(h_occupancy[type][layer-1].GetMaximum()*1.5)
h_occupancy[type][layer-1].SetMinimum(0)
if index==0:
h_occupancy[type][layer-1].Draw('h')
h_occupancy[type][layer-1].Draw('candlex(10000311)')
# leg.AddEntry(h_occupancy[type][layer-1], 'Layer'+str(layer), '')
else:
# h_occupancy[type][layer-1].Draw('hsame')
h_occupancy[type][layer-1].Draw('candlex(10000311) same')
leg.AddEntry(h_occupancy[type][layer-1], type, 'lep')
leg.Draw()
canvas_layer.SaveAs('plot/cluster_profile_L' + str(layer) + '.gif')
| [
"Yuta.Takahashi@cern.ch"
] | Yuta.Takahashi@cern.ch |
8c54cbf7d9b12f2b8648a69b8b076a0ef55f1036 | 7dcd8ca463f3d0d727ed631a35ef112d38d193f2 | /Python/3. Image Processing/negative.py | f4a0f8d1cfbd8b719dda6bc5ee456fa1302eff0d | [
"MIT"
] | permissive | shoaibrayeen/Data-Science-With-Python-And-R | 03b38da9e8b0ebead34c51efa44f7e5052f773c4 | 2f4f398a2ea414395c4ff04b38c777f96f78bab2 | refs/heads/master | 2021-07-10T23:38:10.627283 | 2020-10-06T05:02:32 | 2020-10-06T05:02:32 | 199,718,898 | 0 | 1 | MIT | 2020-10-06T05:02:33 | 2019-07-30T19:59:58 | Jupyter Notebook | UTF-8 | Python | false | false | 263 | py | import numpy as np
from PIL import Image
img = Image.open("./Image/boat.png")
img_array=np.array(img,dtype=np.float32)
img_array[:,:] =255-img_array[:,:]
img_array = img_array.astype(np.uint8)
img=Image.fromarray(img_array)
img.save("./Image/boatNegative.png")
| [
"noreply@github.com"
] | shoaibrayeen.noreply@github.com |
f17f725e2579c9a17a0f4ca272b528a7a3edb257 | 83830aff551b9f9c13a24d602c26cdc6559f2bd2 | /gozerplugs/shakespear.py | 642132cd7800703c2a784e6a17f02b399c51a8fa | [] | no_license | polichism/my-gozerbot | 8182a826aec731e49d44c595fd1dc7837e811db5 | ea86f2b7713457fc7a73f1227b969b230debda48 | refs/heads/master | 2021-01-17T18:03:23.135742 | 2014-04-04T11:44:28 | 2014-04-04T11:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,985 | py | # plugs/shakespear.py
#
#
""" uses the random lib """
__copyright__ = 'this file is in the public domain'
__revision__ = '$Id: shakespeare.py 517 2006-12-21 05:00:00Z deck $'
from gozerbot.generic import handle_exception
from gozerbot.commands import cmnds
from gozerbot.examples import examples
from gozerbot.plughelp import plughelp
import re, random
plughelp.add('shakespear', 'display a shakespearean insult')
set_a=["Away I say",
"Bathe thyself",
"Be not deaf",
"Behold thy mirror",
"Beware my sting",
"Clean thine ears",
"Drink up eisel",
"Eat a crododile",
"Eat my knickers",
"Fie upon thee",
"Forsooth say I",
"Get thee gone",
"Get thee hence",
"Grow unsightly warts",
"Hear me now",
"Hear this pox alert",
"I'll see thee hang'd",
"Kiss my codpiece",
"Lead apes in hell",
"Methinks you stinks",
"My finger in thine eye",
">>Phui<< I say",
"Remove thine ass hence",
"Resign not thy day gig",
"Sit thee on a spit",
"Sorrow on thee",
"Swim with leeches",
"Thou dost intrude",
"Thy mother wears armor",
"Trip on thy sword",
"Tune thy lute",
"Why, how now putz",
"Wipe thy ugly face"]
set_b=["artless",
"bawdy",
"beslubbering",
"bootless",
"cankerous",
"churlish",
"cockered",
"clouted",
"craven",
"currish",
"dankish",
"dissembling",
"droning",
"errant",
"fawning",
"fobbing",
"fool-born",
"froward",
"frothy",
"gleeking",
"goatish",
"gorbellied",
"ill-nurtured",
"impertinent",
"incestuous",
"incurable",
"infectious",
"jarring",
"loggerheaded",
"lumpish",
"loutish",
"mammering",
"mangled",
"mewling",
"paunchy",
"pribbling",
"puking",
"puny",
"qualling",
"rank",
"reeky",
"roguish",
"rump-fed",
"ruttish",
"saucy",
"spleeny",
"spongy",
"surly",
"tardy-gaited",
"tottering",
"unmuzzled",
"vain",
"venomed",
"warped",
"wayward",
"weedy",
"whoreson",
"wretched",
"yeasty"]
set_c=["addlepated",
"base-court",
"bat-fowling",
"beef-witted",
"beetle-headed",
"boil-brained",
"clapper-clawed",
"clay-brained",
"codpiece-sniffing",
"common-kissing",
"crook-pated",
"dismal-dreaming",
"dizzy-eyed",
"doghearted",
"dread-bolted",
"earth-vexing",
"elf-skinned",
"fat-kidneyed",
"fen-sucked",
"flap-mouthed",
"fly-bitten",
"folly-fallen",
"fool-born",
"foul-practicing",
"full-gorged",
"guts-griping",
"half-faced",
"hasty-witted",
"hedge-born",
"hell-hated",
"idle-headed",
"ill-breeding",
"ill-nurtured",
"knotty-pated",
"mad-brained",
"milk-livered",
"motley-minded",
"onion-eyed",
"plume-plucked",
"pottle-deep",
"pox-marked",
"reeling-ripe",
"rough-hewn",
"rude-growing",
"rump-fed",
"shard-borne",
"sheep-biting",
"spur-galled",
"swag-bellied",
"tardy-gaited",
"tickle-brained",
"toad-spotted",
"unchin-snouted",
"weather-bitten"]
set_d=["apple-john",
"baggage",
"barnacle",
"bladder",
"boar-pig",
"bugbear",
"bum-bailey",
"canker-blossom",
"clack-dish",
"clotpole",
"coxcomb",
"codpiece",
"death-token",
"dewberry",
"dotard",
"flap-dragon",
"flax-wench",
"flea",
"flirt-gill",
"foot-licker",
"fustilarian",
"giglet",
"gudgeon",
"haggard",
"harpy",
"hedge-pig",
"horn-beast",
"hugger-mugger",
"jolthead",
"knave",
"lewdster",
"lout",
"maggot-pie",
"malt-worm",
"mammet",
"measle",
"minnow",
"miscreant",
"moldwarp",
"mumble-news",
"nit",
"nut-hook",
"pigeon-egg",
"pignut",
"pumpion",
"puttock",
"ratsbane",
"rudesby",
"scut",
"skainsmate",
"strumpet",
"varlot",
"vassal",
"wagtail",
"water-fly",
"whey-face",
"winter-cricket"]
def handle_insult(bot, ievent):
ievent.reply(random.choice(set_a)+" "+random.choice(set_b)+" "+random.choice(set_c)+" "+random.choice(set_d))
cmnds.add('insult', handle_insult, 'USER')
examples.add('insult', 'show a shakespearean insult', 'insult')
| [
"blaxter@gmail.com"
] | blaxter@gmail.com |
163a84077a6b26e6de2ab3e58360644ced3eac16 | 043160352216a7fc21be4c8a44507e00f523bf80 | /test/functional/rpc_spork.py | 1f5f0298165fba056629b95302833155549086b0 | [
"MIT"
] | permissive | odinyblockchain/odinycoin | 5ef2a1bca374230882c91e8c6717bbb8faf889ad | 183751aac9357455913f1d8a415b1dcb04225ee0 | refs/heads/master | 2022-12-18T14:14:02.535216 | 2020-09-20T22:05:14 | 2020-09-20T22:05:14 | 295,208,711 | 0 | 2 | MIT | 2020-09-18T10:33:17 | 2020-09-13T18:06:52 | C++ | UTF-8 | Python | false | false | 2,830 | py | #!/usr/bin/env python3
# Copyright (c) 2019 The Odinycoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from time import sleep
from test_framework.test_framework import OdinycoinTestFramework
from test_framework.util import set_node_times, assert_equal
class Odinycoin_RPCSporkTest(OdinycoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-staking=1']] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with clean chain
self._initialize_chain_clean()
self.enable_mocktime()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Performs tests on the Spork RPC"
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def run_test(self):
self.log_title()
set_node_times(self.nodes, self.mocktime)
sporkName = "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"
# 0 - check SPORK 8 status from node 1 (must be inactive)
assert_equal(False, self.is_spork_active(1, sporkName))
# 1 - activate SPORK 8 with nodes[0]
assert_equal("success", self.activate_spork(0, sporkName))
sleep(1)
# check SPORK 8 status from nodes[1] (must be active)
assert_equal(True, self.is_spork_active(1, sporkName))
# 2 - Adjust time to 1 sec in the future and deactivate SPORK 8 with node[0]
self.mocktime += 1
set_node_times(self.nodes, self.mocktime)
assert_equal("success", self.deactivate_spork(0, sporkName))
sleep(1)
# check SPORK 8 value from nodes[1] (must be inactive again)
assert_equal(False, self.is_spork_active(1, sporkName))
# 3 - Adjust time to 1 sec in the future and set new value (mocktime) for SPORK 8 with node[0]
self.mocktime += 1
set_node_times(self.nodes, self.mocktime)
assert_equal("success", self.set_spork(0, sporkName, self.mocktime))
sleep(1)
# check SPORK 8 value from nodes[1] (must be equal to mocktime)
assert_equal(self.mocktime, self.get_spork(1, sporkName))
# 4 - Stop nodes and check value again after restart
self.log.info("Stopping nodes...")
self.stop_nodes()
self.log.info("Restarting node 1...")
self.start_node(1, [])
assert_equal(self.mocktime, self.get_spork(1, sporkName))
self.log.info("%s: TEST PASSED" % self.__class__.__name__)
if __name__ == '__main__':
Odinycoin_RPCSporkTest().main()
| [
"71228635+odinyblockchain@users.noreply.github.com"
] | 71228635+odinyblockchain@users.noreply.github.com |
5324cccd48635b974ca2c7204c7c9e487799df0a | 6b14d9a64a578239e5612e6098320b61b45c08d9 | /OCT16/02.py | 57cc74270ae7b19782b96a16b7b3c68a44bd1fac | [
"MIT"
] | permissive | Razdeep/PythonSnippets | 498c403140fec33ee2f0dd84801738f1256ee9dd | 76f9313894f511c487a99bc38bdf0fe5e594caf5 | refs/heads/master | 2020-03-26T08:56:23.067022 | 2018-11-26T05:36:36 | 2018-11-26T05:36:36 | 144,726,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | # Styling graphs
import matplotlib.pyplot as plt
plt.plot([1,2,3,4],[5,8,7,25],'r--')
# plt.plot([1,2,3,4],[5,8,7,25],'g^') # Shows green triangles
plt.title('Rain in december')
plt.xlabel('Days in december')
plt.ylabel('Inches in rain')
plt.show() | [
"rrajdeeproychowdhury@gmail.com"
] | rrajdeeproychowdhury@gmail.com |
42824fb36a1dc24acbcb8076fba9574ee8f0bf72 | bc047ab30357479f40f2106af46d9e0c0c1a8bb4 | /accounts/migrations/0008_auto_20200811_1457.py | 5335030c4d28fd3badc63dff70c2fb0ccfad61d6 | [] | no_license | kahdichienja/uniminus2 | 10c838b450ce1c3e2f0f5b840cc060e6fa26a418 | d9243f1654432d16697f4f6d4a8206c2a4179541 | refs/heads/master | 2022-11-30T05:03:38.243675 | 2020-08-11T13:55:35 | 2020-08-11T13:55:35 | 285,979,880 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Generated by Django 2.1.7 on 2020-08-11 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_personalfileupload'),
]
operations = [
migrations.AlterField(
model_name='personalfileupload',
name='kcse_cert',
field=models.FileField(blank=True, null=True, upload_to='kcsecert'),
),
]
| [
"ago@localhost.localdomain"
] | ago@localhost.localdomain |
ef1098dfcbe246827541db2aefd6c3f8ecc8f4de | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/tapandturn/testcase/firstcases/testcase7_006.py | b91bd32806c9939333a62d141c1f6f768e4e7c5c | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,520 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.gabm.tapandturn',
'appActivity' : 'com.gabm.tapandturn.ui.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.gabm.tapandturn/com.gabm.tapandturn.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase7_006
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"com.gabm.tapandturn:id/seekbar\").className(\"android.widget.SeekBar\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"60\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.gabm.tapandturn:id/seekbar\").className(\"android.widget.SeekBar\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
driver.press_keycode(4)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"7_006\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.gabm.tapandturn'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
d31f9f3dea79d590895bdcac0b013ca328a820d6 | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays37CstmLiqEtlp_px_N2.py | f39a795e60701a5238d3eca02648ee7c6712b6ee | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | def LiqEtlp_px_N2(P,T,x_N2):
x = (P-5.41573658e+02)/2.47804900e-01
y = (T--1.78069279e+02)/7.24480000e-03
z = (x_N2-9.96540601e-01)/9.95332218e-04
output = \
1*-8.06034533e+03
liq_etlp = output*1.00000000e+00+0.00000000e+00
return liq_etlp | [
"1052632241@qq.com"
] | 1052632241@qq.com |
665f285f713fce0f55dc8b14e94c2d734e1b98b8 | b891f38eb12eeafdbcec9deee2320acfaac3a7ad | /0x01-python-if_else_loops_functions/7-islower.py | 92a7fd93a6f4dffa58f2d130a83a4dfb40bb188e | [] | no_license | davixcky/holbertonschool-higher_level_programming | bb112af3e18994a46584ac3e78385e46c3d918f6 | fe4cd0e95ee976b93bd47c85c2bc810049f568fa | refs/heads/master | 2023-01-11T00:41:03.145968 | 2020-09-22T22:55:53 | 2020-09-22T22:55:53 | 259,390,611 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | #!/usr/bin/python3
def islower(c):
value = ord(c)
return value >= 97 and value <= 122
| [
"dvdizcky@gmail.com"
] | dvdizcky@gmail.com |
d0fafe1c03d3be10fa89d812b4598501ee240c1a | 1065ec75d9ee668ffd7aafc6a8de912d7c2cee6f | /addons/script.icechannel.extn.extra.uk/plugins/livetv_uk/islam_channel_ltvi.py | 45d34e60e67ec4c94ae5ac0736e202cbe32e6204 | [] | no_license | bopopescu/kodiprofile | 64c067ee766e8a40e5c148b8e8ea367b4879ffc7 | 7e78640a569a7f212a771aab6a4a4d9cb0eecfbe | refs/heads/master | 2021-06-11T17:16:15.498281 | 2016-04-03T06:37:30 | 2016-04-03T06:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveTVIndexer
from entertainment.plugnplay import Plugin
from entertainment import common
class islam_channel(LiveTVIndexer):
implements = [LiveTVIndexer]
display_name = "Islam Channel"
name = "islam_channel"
other_names = "islam_channel,Islam Channel"
import xbmcaddon
import os
addon_id = 'script.icechannel.extn.extra.uk'
addon = xbmcaddon.Addon(addon_id)
img = os.path.join( addon.getAddonInfo('path'), 'resources', 'images', name + '.png' )
regions = [
{
'name':'United Kingdom',
'img':addon.getAddonInfo('icon'),
'fanart':addon.getAddonInfo('fanart')
},
]
languages = [
{'name':'English', 'img':'', 'fanart':''},
]
genres = [
{'name':'International', 'img':'', 'fanart':''}
]
addon = None
| [
"sokasoka@hotmail.com"
] | sokasoka@hotmail.com |
e73fbfaaa91a9301ec2a18d4f2a6130034fe5553 | d5b48163d236ca770be8e687f92192e2971397e8 | /116.py | d7870f37e24c92963d66021b29540135312aafc5 | [] | no_license | Kunal352000/python_program | 191f5d9c82980eb706e11457c2b5af54b0d2ae95 | 7a1c645f9eab87cc45a593955dcb61b35e2ce434 | refs/heads/main | 2023-07-12T19:06:19.121741 | 2021-08-21T11:58:41 | 2021-08-21T11:58:41 | 376,606,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | num=int(input("Enter a number: "))
for i in range(num):
for j in range(num-1-i):
print(" ",end="")
for j in range(num):
print("*",end="")
print()
| [
"noreply@github.com"
] | Kunal352000.noreply@github.com |
dec76e7957ea062e6fdd0bc4d7e16cd9a404bade | 992d0d5e06813f6dff323e4b528cd39b4cbaa955 | /pytorch入门与实践/Fast-Neural-Style/utils.py | e07c2f6fef99b3c70c5e359c84229fa87699be98 | [] | no_license | happy-luck/pytorch-study | 9997d6b92785df4d6b4b0eb8c8f8ab9ee15bfc2b | 137a7eb0d76ad2bc3e731aade2bfda4586e7d21a | refs/heads/master | 2022-11-17T16:34:02.974645 | 2020-07-16T01:32:33 | 2020-07-16T01:32:33 | 279,233,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | # coding:utf8
from itertools import chain
import visdom
import torch as t
import time
import torchvision as tv
import numpy as np
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
def gram_matrix(y):
'''
输入 b,c,h,w
输出 b,c,c
'''
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
class Visualizer():
'''
封装了visdom的基本操作,但是你仍然可以通过`self.vis.function`
调用原生的visdom接口
'''
def __init__(self, env='default', **kwargs):
import visdom
self.vis = visdom.Visdom(env=env, **kwargs)
# 画的第几个数,相当于横座标
# 保存(’loss',23) 即loss的第23个点
self.index = {}
self.log_text = ''
def reinit(self, env='default', **kwargs):
'''
修改visdom的配置
'''
self.vis = visdom.Visdom(env=env, **kwargs)
return self
def plot_many(self, d):
'''
一次plot多个
@params d: dict (name,value) i.e. ('loss',0.11)
'''
for k, v in d.items():
self.plot(k, v)
def img_many(self, d):
for k, v in d.items():
self.img(k, v)
def plot(self, name, y):
'''
self.plot('loss',1.00)
'''
x = self.index.get(name, 0)
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=name,
opts=dict(title=name),
update=None if x == 0 else 'append'
)
self.index[name] = x + 1
def img(self, name, img_):
'''
self.img('input_img',t.Tensor(64,64))
'''
if len(img_.size()) < 3:
img_ = img_.cpu().unsqueeze(0)
self.vis.image(img_.cpu(),
win=name,
opts=dict(title=name)
)
def img_grid_many(self, d):
for k, v in d.items():
self.img_grid(k, v)
def img_grid(self, name, input_3d):
'''
一个batch的图片转成一个网格图,i.e. input(36,64,64)
会变成 6*6 的网格图,每个格子大小64*64
'''
self.img(name, tv.utils.make_grid(
input_3d.cpu()[0].unsqueeze(1).clamp(max=1, min=0)))
def log(self, info, win='log_text'):
'''
self.log({'loss':1,'lr':0.0001})
'''
self.log_text += ('[{time}] {info} <br>'.format(
time=time.strftime('%m%d_%H%M%S'), \
info=info))
self.vis.text(self.log_text, win='log_text')
def __getattr__(self, name):
return getattr(self.vis, name)
def get_style_data(path):
'''
加载风格图片,
输入: path, 文件路径
返回: 形状 1*c*h*w, 分布 -2~2
'''
style_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
])
style_image = tv.datasets.folder.default_loader(path)
style_tensor = style_transform(style_image)
return style_tensor.unsqueeze(0)
def normalize_batch(batch):
'''
输入: b,ch,h,w 0~255
输出: b,ch,h,w -2~2
'''
mean = batch.data.new(IMAGENET_MEAN).view(1, -1, 1, 1)
std = batch.data.new(IMAGENET_STD).view(1, -1, 1, 1)
mean = t.autograd.Variable(mean.expand_as(batch.data))
std = t.autograd.Variable(std.expand_as(batch.data))
return (batch / 255.0 - mean) / std
| [
"18813129242@163.com"
] | 18813129242@163.com |
27b844b352de333d17ec109d4f30f57512010ac0 | 8d2a124753905fb0455f624b7c76792c32fac070 | /pytnon-month01/month01-shibw-notes/day10-shibw/exercise01-定义类.py | abe7abecd735db16f3f2a77f7971105114d5bfaa | [] | no_license | Jeremy277/exercise | f38e4f19aae074c804d265f6a1c49709fd2cae15 | a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa | refs/heads/master | 2020-07-27T09:14:00.286145 | 2019-09-17T11:31:44 | 2019-09-17T11:31:44 | 209,041,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | #定义 Dog类
#Dog中的数据有 name kinds color
#Dog的行为有
# eat 打印 狗吃xx
# run 打印 狗正在以xxkm/h的速度飞奔
class Dog:
def __init__(self,name,kinds,color):
self.name = name
self.kinds = kinds
self.color = color
def eat(self,food):
print('%s正在吃%s' % (self.name,food))
def run(self,speed):
print('%s的%s正在以%skm/h的速度飞奔' %(self.color,self.kinds,speed))
#创建两个Dog对象
#调用__init__
wangcai = Dog('旺财','中华田园犬','黄色')
wangcai.eat('骨头')
wangcai.run(40)
#将Dog对象的地址赋值给doudou(两个变量指向一个对象)
doudou = wangcai
# doudou.eat('狗粮')#
# wangcai.eat('火腿肠')
doudou.name = '豆豆'
wangcai.eat('排骨')#豆豆正在吃排骨
list01 = [wangcai,doudou,Dog('儿子','哈士奇','灰色')]
list02 = list01
list01[2].color = '白色'
print(list02[2].color)#?
| [
"13572093824@163.com"
] | 13572093824@163.com |
ef62508ce0309f54e17c2f9c0bc851c8a77efc7b | 72ce57d187fb6a4730f1390e280b939ef8087f5d | /nuitka/codegen/TryCodes.py | 3aebcda27ebb9915c961fe9c176fade07830c1ea | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | tommyli3318/Nuitka | c5b7681b73d96cb8859210ed1a78f09149a23825 | ae52b56024d53159a72a5acbfaac792ca207c418 | refs/heads/develop | 2020-05-02T17:02:10.578065 | 2019-10-27T15:53:32 | 2019-10-27T15:53:32 | 178,086,582 | 1 | 0 | Apache-2.0 | 2019-06-06T00:32:48 | 2019-03-27T22:53:31 | Python | UTF-8 | Python | false | false | 11,134 | py | # Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Try statement and related code generation.
For Nuitka, all try/except and try/finally are dealt with this, where the
finally block gets duplicated into handlers. So this is a common low level
structure used, where exception handling and everything is made explicit.
"""
from nuitka import Options
from .CodeHelpers import generateExpressionCode, generateStatementSequenceCode
from .ErrorCodes import getMustNotGetHereCode
from .ExceptionCodes import getExceptionUnpublishedReleaseCode
from .IteratorCodes import getBuiltinLoopBreakNextCode
from .LabelCodes import getGotoCode, getLabelCode
from .VariableCodes import getVariableAssignmentCode
def generateTryCode(statement, emit, context):
# The try construct is the most complex for code generation. We may need to
# react on break, continue, return, raise in the handlers. For exception
# and return handlers, we need to be able to re-raise or re-return.
# So this is full of detail stuff, pylint: disable=too-many-branches,too-many-locals,too-many-statements
if generateTryNextExceptStopIterationCode(statement, emit, context):
return
# Get the statement sequences involved. All except the tried block can be
# None. For the tried block it would be a missed optimization. Also not all
# the handlers must be None, then it's also a missed optimization.
tried_block = statement.getBlockTry()
except_handler = statement.getBlockExceptHandler()
continue_handler = statement.getBlockContinueHandler()
break_handler = statement.getBlockBreakHandler()
return_handler = statement.getBlockReturnHandler()
tried_block_may_raise = tried_block.mayRaiseException(BaseException)
assert (
tried_block_may_raise
or continue_handler is not None
or break_handler is not None
or return_handler is not None
), statement.asXmlText()
# The tried statements might raise, for which we define an escape.
tried_handler_escape = context.allocateLabel("try_except_handler")
if tried_block_may_raise:
old_exception_escape = context.setExceptionEscape(tried_handler_escape)
# The tried statements might continue, for which we define an escape.
continue_handler_escape = context.allocateLabel("try_continue_handler")
if continue_handler is not None:
old_continue_target = context.setLoopContinueTarget(continue_handler_escape)
# The tried statements might break, for which we define an escape.
break_handler_escape = context.allocateLabel("try_break_handler")
if break_handler is not None:
old_break_target = context.setLoopBreakTarget(break_handler_escape)
# The tried statements might return, for which we define an escape.
return_handler_escape = context.allocateLabel("try_return_handler")
if return_handler is not None:
old_return_target = context.setReturnTarget(return_handler_escape)
# Now the tried block can be generated, cannot be "None" or else the
# optimization failed.
emit("// Tried code:")
generateStatementSequenceCode(
statement_sequence=tried_block, emit=emit, allow_none=False, context=context
)
# Restore the old escape targets as preserved above, during the handlers,
# the parent handlers should be back in effect.
if tried_block_may_raise:
context.setExceptionEscape(old_exception_escape)
if continue_handler:
context.setLoopContinueTarget(old_continue_target)
if break_handler:
context.setLoopBreakTarget(old_break_target)
if return_handler:
context.setReturnTarget(old_return_target)
post_label = None
if not tried_block.isStatementAborting():
if post_label is None:
post_label = context.allocateLabel("try_end")
getGotoCode(post_label, emit)
else:
getMustNotGetHereCode(
reason="tried codes exits in all cases", context=context, emit=emit
)
if return_handler is not None:
assert tried_block.mayReturn()
emit("// Return handler code:")
getLabelCode(return_handler_escape, emit)
# During the return value, the value being returned is in a variable,
# and therefore needs to be released before being updated.
old_return_value_release = context.setReturnReleaseMode(True)
generateStatementSequenceCode(
statement_sequence=return_handler,
emit=emit,
allow_none=False,
context=context,
)
context.setReturnReleaseMode(old_return_value_release)
assert return_handler.isStatementAborting()
if tried_block_may_raise:
emit("// Exception handler code:")
getLabelCode(tried_handler_escape, emit)
# Need to preserve exception state.
keeper_type, keeper_value, keeper_tb, keeper_lineno = (
context.allocateExceptionKeeperVariables()
)
old_keepers = context.setExceptionKeeperVariables(
(keeper_type, keeper_value, keeper_tb, keeper_lineno)
)
assert keeper_type is not None
exception_type, exception_value, exception_tb, exception_lineno = (
context.variable_storage.getExceptionVariableDescriptions()
)
# TODO: That normalization and chaining is only necessary if the
# exception is published.
emit(
"""\
%(keeper_type)s = %(exception_type)s;
%(keeper_value)s = %(exception_value)s;
%(keeper_tb)s = %(exception_tb)s;
%(keeper_lineno)s = %(exception_lineno)s;
%(exception_type)s = NULL;
%(exception_value)s = NULL;
%(exception_tb)s = NULL;
%(exception_lineno)s = 0;
"""
% {
"keeper_type": keeper_type,
"keeper_value": keeper_value,
"keeper_tb": keeper_tb,
"keeper_lineno": keeper_lineno,
"exception_type": exception_type,
"exception_value": exception_value,
"exception_tb": exception_tb,
"exception_lineno": exception_lineno,
}
)
generateStatementSequenceCode(
statement_sequence=except_handler,
emit=emit,
allow_none=True,
context=context,
)
if except_handler is None or not except_handler.isStatementAborting():
getExceptionUnpublishedReleaseCode(emit, context)
if post_label is None:
post_label = context.allocateLabel("try_end")
getGotoCode(post_label, emit)
getMustNotGetHereCode(
reason="exception handler codes exits in all cases",
context=context,
emit=emit,
)
context.setExceptionKeeperVariables(old_keepers)
else:
assert except_handler is None
if break_handler is not None:
assert tried_block.mayBreak()
emit("// try break handler code:")
getLabelCode(break_handler_escape, emit)
generateStatementSequenceCode(
statement_sequence=break_handler,
emit=emit,
allow_none=False,
context=context,
)
assert break_handler.isStatementAborting()
if continue_handler is not None:
assert tried_block.mayContinue()
emit("// try continue handler code:")
getLabelCode(continue_handler_escape, emit)
generateStatementSequenceCode(
statement_sequence=continue_handler,
emit=emit,
allow_none=False,
context=context,
)
assert continue_handler.isStatementAborting()
emit("// End of try:")
if post_label is not None:
getLabelCode(post_label, emit)
def generateTryNextExceptStopIterationCode(statement, emit, context):
# This has many branches which mean this optimized code generation is not
# applicable, we return each time. pylint: disable=too-many-branches,too-many-return-statements
except_handler = statement.getBlockExceptHandler()
if except_handler is None:
return False
if statement.getBlockBreakHandler() is not None:
return False
if statement.getBlockContinueHandler() is not None:
return False
if statement.getBlockReturnHandler() is not None:
return False
tried_statements = statement.getBlockTry().getStatements()
if len(tried_statements) != 1:
return False
handling_statements = except_handler.getStatements()
if len(handling_statements) != 1:
return False
tried_statement = tried_statements[0]
if not tried_statement.isStatementAssignmentVariable():
return False
assign_source = tried_statement.getAssignSource()
if not assign_source.isExpressionBuiltinNext1():
return False
handling_statement = handling_statements[0]
if not handling_statement.isStatementConditional():
return False
yes_statements = handling_statement.getBranchYes().getStatements()
no_statements = handling_statement.getBranchNo().getStatements()
if len(yes_statements) != 1:
return False
if not yes_statements[0].isStatementLoopBreak():
return False
if len(no_statements) != 1:
return False
if not no_statements[0].isStatementReraiseException():
return False
tmp_name = context.allocateTempName("next_source")
generateExpressionCode(
expression=assign_source.getValue(),
to_name=tmp_name,
emit=emit,
context=context,
)
tmp_name2 = context.allocateTempName("assign_source")
old_source_ref = context.setCurrentSourceCodeReference(
assign_source.getSourceReference()
if Options.isFullCompat()
else statement.getSourceReference()
)
getBuiltinLoopBreakNextCode(
to_name=tmp_name2, value=tmp_name, emit=emit, context=context
)
getVariableAssignmentCode(
tmp_name=tmp_name2,
variable=tried_statement.getVariable(),
variable_trace=tried_statement.getVariableTrace(),
needs_release=None,
in_place=False,
emit=emit,
context=context,
)
context.setCurrentSourceCodeReference(old_source_ref)
if context.needsCleanup(tmp_name2):
context.removeCleanupTempName(tmp_name2)
return True
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
02ce301afe155e8d1301e152c047e372786ae63a | 43852c47c9bf8f1d7d54f564a7130bb667df5110 | /python/ray/air/util/check_ingest.py | 79ea645049007a3275126cf71523096461375f9d | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | jon-chuang/ray | 654de3518b77a9b4285ef402053a3db3e341ec94 | adf2b92afcb4688251388838210e18b8721871b9 | refs/heads/master | 2023-05-26T09:04:24.349126 | 2023-02-20T02:33:40 | 2023-02-20T02:33:40 | 206,287,690 | 1 | 1 | Apache-2.0 | 2023-05-20T08:02:24 | 2019-09-04T09:55:00 | Python | UTF-8 | Python | false | false | 7,803 | py | #!/usr/bin/env python
import sys
import time
from typing import Optional
import numpy as np
import ray
from ray.air import session
from ray.air.config import DatasetConfig, ScalingConfig
from ray.data import Dataset, DatasetIterator, Preprocessor
from ray.data.preprocessors import BatchMapper, Chain
from ray.train._internal.dataset_spec import DataParallelIngestSpec
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class DummyTrainer(DataParallelTrainer):
"""A Trainer that does nothing except read the data for a given number of epochs.
It prints out as much debugging statistics as possible.
This is useful for debugging data ingest problem. This trainer supports normal
scaling options same as any other Trainer (e.g., num_workers, use_gpu).
"""
def __init__(
self,
*args,
scaling_config: Optional[ScalingConfig] = None,
num_epochs: int = 1,
prefetch_blocks: int = 1,
batch_size: Optional[int] = 4096,
**kwargs
):
if not scaling_config:
scaling_config = ScalingConfig(num_workers=1)
super().__init__(
train_loop_per_worker=DummyTrainer.make_train_loop(
num_epochs, prefetch_blocks, batch_size
),
*args,
scaling_config=scaling_config,
**kwargs
)
def preprocess_datasets(self):
print("Starting dataset preprocessing")
start = time.perf_counter()
super().preprocess_datasets()
print("Preprocessed datasets in", time.perf_counter() - start, "seconds")
if self.preprocessor:
print("Preprocessor", self.preprocessor)
print(
"Preprocessor transform stats:\n\n{}".format(
self.preprocessor.transform_stats()
)
)
@staticmethod
def make_train_loop(
num_epochs: int, prefetch_blocks: int, batch_size: Optional[int]
):
"""Make a debug train loop that runs for the given amount of epochs."""
def train_loop_per_worker():
import pandas as pd
rank = session.get_world_rank()
data_shard = session.get_dataset_shard("train")
start = time.perf_counter()
epochs_read, batches_read, bytes_read = 0, 0, 0
batch_delays = []
print("Starting train loop on worker", rank)
for epoch in range(num_epochs):
epochs_read += 1
batch_start = time.perf_counter()
for batch in data_shard.iter_batches(
prefetch_blocks=prefetch_blocks, batch_size=batch_size
):
batch_delay = time.perf_counter() - batch_start
batch_delays.append(batch_delay)
batches_read += 1
if isinstance(batch, pd.DataFrame):
bytes_read += int(
batch.memory_usage(index=True, deep=True).sum()
)
elif isinstance(batch, np.ndarray):
bytes_read += batch.nbytes
else:
# NOTE: This isn't recursive and will just return the size of
# the object pointers if list of non-primitive types.
bytes_read += sys.getsizeof(batch)
session.report(
dict(
bytes_read=bytes_read,
batches_read=batches_read,
epochs_read=epochs_read,
batch_delay=batch_delay,
)
)
batch_start = time.perf_counter()
delta = time.perf_counter() - start
print("Time to read all data", delta, "seconds")
print(
"P50/P95/Max batch delay (s)",
np.quantile(batch_delays, 0.5),
np.quantile(batch_delays, 0.95),
np.max(batch_delays),
)
print("Num epochs read", epochs_read)
print("Num batches read", batches_read)
print("Num bytes read", round(bytes_read / (1024 * 1024), 2), "MiB")
print(
"Mean throughput", round(bytes_read / (1024 * 1024) / delta, 2), "MiB/s"
)
if rank == 0:
print("Ingest stats from rank=0:\n\n{}".format(data_shard.stats()))
return train_loop_per_worker
@DeveloperAPI
def make_local_dataset_iterator(
dataset: Dataset,
preprocessor: Preprocessor,
dataset_config: DatasetConfig,
) -> DatasetIterator:
"""A helper function to create a local
:py:class:`DatasetIterator <ray.data.DatasetIterator>`,
like the one returned by :meth:`~ray.air.session.get_dataset_shard`.
This function should only be used for development and debugging. It will
raise an exception if called by a worker instead of the driver.
Args:
dataset: The input Dataset.
preprocessor: The preprocessor that will be applied to the input dataset.
dataset_config: The dataset config normally passed to the trainer.
"""
runtime_context = ray.runtime_context.get_runtime_context()
if runtime_context.worker.mode == ray._private.worker.WORKER_MODE:
raise RuntimeError(
"make_local_dataset_iterator should only be used by the driver "
"for development and debugging. To consume a dataset from a "
"worker or AIR trainer, see "
"https://docs.ray.io/en/latest/ray-air/check-ingest.html."
)
dataset_config = dataset_config.fill_defaults()
spec = DataParallelIngestSpec({"train": dataset_config})
spec.preprocess_datasets(preprocessor, {"train": dataset})
training_worker_handles = [None]
it = spec.get_dataset_shards(training_worker_handles)[0]["train"]
return it
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-epochs", "-e", type=int, default=1, help="Number of epochs to read."
)
parser.add_argument(
"--prefetch-blocks",
"-b",
type=int,
default=1,
help="Number of blocks to prefetch when reading data.",
)
args = parser.parse_args()
# Generate a synthetic dataset of ~10GiB of float64 data. The dataset is sharded
# into 100 blocks (parallelism=100).
dataset = ray.data.range_tensor(50000, shape=(80, 80, 4), parallelism=100)
# An example preprocessor chain that just scales all values by 4.0 in two stages.
preprocessor = Chain(
BatchMapper(lambda df: df * 2, batch_format="pandas"),
BatchMapper(lambda df: df * 2, batch_format="pandas"),
)
# Setup the dummy trainer that prints ingest stats.
# Run and print ingest stats.
trainer = DummyTrainer(
scaling_config=ScalingConfig(num_workers=1, use_gpu=False),
datasets={"train": dataset},
preprocessor=preprocessor,
num_epochs=args.num_epochs,
prefetch_blocks=args.prefetch_blocks,
dataset_config={"train": DatasetConfig()},
batch_size=None,
)
print("Dataset config", trainer.get_dataset_config())
trainer.fit()
# Print memory stats (you can also use "ray memory --stats-only" to monitor this
# during the middle of the run.
try:
print(
"Memory stats at end of ingest:\n\n{}".format(
ray._private.internal_api.memory_summary(stats_only=True)
)
)
except Exception:
print("Error getting Ray memory stats")
| [
"noreply@github.com"
] | jon-chuang.noreply@github.com |
b73a6050107b0d880b9466a83b8d551cad4b616e | 1cca2891740d5ed6925f1ab0b1ade7ff814ff504 | /vcenter/migrations/0013_auto_20161108_1507.py | f4ad2c4c4c8dfb12d7d7e376945182a65457de56 | [] | no_license | sj741231/stockstar-vsa | bac5dd747e3ccfd4c36067b79ae30b1e88dc4597 | f5877567b6d7a0e3ab9895416ea95d02f3b572a4 | refs/heads/master | 2021-01-24T08:32:42.055321 | 2017-06-05T14:57:49 | 2017-06-12T03:15:55 | 93,385,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-11-08 07:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vcenter', '0012_racks'),
]
operations = [
migrations.RemoveField(
model_name='racks',
name='idc',
),
migrations.DeleteModel(
name='Racks',
),
]
| [
"shi.jin@126.com"
] | shi.jin@126.com |
5a921dd7bcd488ff6820ab9bfe93341267a5e720 | 6045075c734d65a3cec63d3ae15f8f9f13836559 | /solutions/0331_Verify_Preorder_Serialization_of_a_Binary_Tree/iter_by_degrees.py | e65ad1f91954b9fd44643c2c6c773ed853e9edd4 | [] | no_license | zh-wang/leetcode | c058470fdf84fb950e3d4f974b27826718942d05 | 6322be072e0f75e2da28b209c1dbb31593e5849f | refs/heads/master | 2021-12-28T02:49:11.964213 | 2021-08-25T06:29:21 | 2021-08-25T06:29:21 | 189,919,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | class Solution:
def isValidSerialization(self, preorder: str) -> bool:
arr = preorder.split(',')
# In a binary tree, if we consider null as leaves, then
# all non-null node provides 2 outdegree and 1 indegree (2 children and 1 parent), except root all null node provides 0 outdegree and 1 indegree (0 child and 1 parent).
# Suppose we try to build this tree. During building, we record the difference between out degree and in degree diff = outdegree - indegree. When the next node comes, we then decrease diff by 1, because the node provides an in degree. If the node is not null, we increase diff by2, because it provides two out degrees. If a serialization is correct, diff should never be negative and diff will be zero when finished.
diff = 1
for v in arr:
# each node provide a indgree
diff -= 1
if diff < 0: # indgree larger than outdgree
return False
if v != '#':
diff += 2 # non-empty node provide two outdgrees
return diff == 0 # indgree must be equal to outdgree
| [
"viennakanon@gmail.com"
] | viennakanon@gmail.com |
fc72d5ac87d23dad96d62f98d314106ccd272a48 | 2b56aaec923a2e7939734c6743c934ad960aef38 | /Greedy/2847.py | b043bd8d61bc02bfaaa4f0e73e87d66a2974d15e | [] | no_license | SAE-HUN/Algorithms | 461757fd5167fed14d7b5aca88fe004a5892c91a | aa2cc4d04e506d762706ae62e93400f1b57c82f0 | refs/heads/master | 2023-05-11T22:42:32.125790 | 2021-06-01T05:21:06 | 2021-06-01T05:21:06 | 306,549,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | n = int(input())
scores = [int(input()) for _ in range(n)]
answer = 0
for i in range(n-1, 0, -1):
if scores[i-1]>scores[i]-1:
answer += scores[i-1] - (scores[i] - 1)
scores[i-1] = scores[i] - 1
print(answer)
| [
"noreply@github.com"
] | SAE-HUN.noreply@github.com |
d6636ecb60664086132846b70730e5069b161a15 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/dab.py | 3542630063ad129ada276066e3c439c1e3a79a77 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'dAB':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
1e3886af77eaff89de7f75fc44a5c1a6b51b08c1 | 4e1a6d64b16ae560f2930090201f5f529a42215e | /transmute_core/framework/__init__.py | 63f35abe520b5e0fe5ec5a990bb0809d2212dd41 | [
"MIT"
] | permissive | yunstanford/transmute-core | 7f34ccd6e02491cfe944cae6a04b4bb666455eed | a8e5dd055f0f3d39327d71dd61bf0ee147f59ebe | refs/heads/master | 2019-07-18T07:37:34.850956 | 2018-10-06T23:18:36 | 2018-10-06T23:18:36 | 118,862,219 | 0 | 0 | MIT | 2018-10-06T23:18:37 | 2018-01-25T04:28:52 | JavaScript | UTF-8 | Python | false | false | 44 | py | from .request_adapter import RequestAdapter
| [
"yusuke@tsutsumi.io"
] | yusuke@tsutsumi.io |
c7cc71d2629e6981c09a205d398e0048180f2f04 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02628/s935095687.py | 13c1c31e7df5d777d0ddb3c50f0327140819f7c9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | import itertools
[N, K] = [int(i) for i in input().split()]
price = [int(i) for i in input().split()]
price.sort()
sum = 0
for i in range(K):
sum += price[i]
print(sum) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1ce23d141a5c561f7101d4c1db3f247966d83ba3 | 22cec5da2b1fb83dcc9cf7c888f1e2078b05b62e | /flora/cmds/stop.py | 7c8e12aaf6e9946367cbd633d88b76f9ccb049fb | [
"Apache-2.0"
] | permissive | JuEnPeHa/flora-blockchain | 649d351e096e73222ab79759c71e191e42da5d34 | 656b5346752d43edb89d7f58aaf35b1cacc9a366 | refs/heads/main | 2023-07-18T08:52:51.353754 | 2021-09-07T08:13:35 | 2021-09-07T08:13:35 | 399,297,784 | 0 | 0 | Apache-2.0 | 2021-08-24T01:30:45 | 2021-08-24T01:30:44 | null | UTF-8 | Python | false | false | 1,402 | py | import sys
from pathlib import Path
import click
from flora.util.service_groups import all_groups, services_for_groups
async def async_stop(root_path: Path, group: str, stop_daemon: bool) -> int:
from flora.daemon.client import connect_to_daemon_and_validate
daemon = await connect_to_daemon_and_validate(root_path)
if daemon is None:
print("Couldn't connect to flora daemon")
return 1
if stop_daemon:
r = await daemon.exit()
await daemon.close()
print(f"daemon: {r}")
return 0
return_val = 0
for service in services_for_groups(group):
print(f"{service}: ", end="", flush=True)
if not await daemon.is_running(service_name=service):
print("Not running")
elif await daemon.stop_service(service_name=service):
print("Stopped")
else:
print("Stop failed")
return_val = 1
await daemon.close()
return return_val
@click.command("stop", short_help="Stop services")
@click.option("-d", "--daemon", is_flag=True, type=bool, help="Stop daemon")
@click.argument("group", type=click.Choice(all_groups()), nargs=-1, required=True)
@click.pass_context
def stop_cmd(ctx: click.Context, daemon: bool, group: str) -> None:
import asyncio
sys.exit(asyncio.get_event_loop().run_until_complete(async_stop(ctx.obj["root_path"], group, daemon)))
| [
"github@floracoin.farm"
] | github@floracoin.farm |
62b59083ccd6965040babc91013ecbb18ad01d76 | aa8fe9e165df16bd17aa5720b8043c533adde9bb | /source/gui/windows/source_window_disabled.py | aef1201903034c6376f2b28d5af8a51faaa52145 | [] | no_license | teamdiamond/qtlab | 31d2ccaee2ada84a027f2160553f54757e6f6cdf | 67d5bbd58c5f4d4ac3914774b56071d51f121010 | refs/heads/master | 2022-12-14T12:07:35.223055 | 2019-06-25T06:53:57 | 2019-06-25T06:53:57 | 15,255,712 | 0 | 4 | null | 2022-12-07T23:37:45 | 2013-12-17T13:54:53 | Python | UTF-8 | Python | false | false | 11,984 | py | # source_window.py, source edit window for the QT Lab environment
# Reinier Heeres <reinier@heeres.eu>, 2008-2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk
import qtclient as qt
import os
import tempfile
import time
from lib.gui import dirtree
try:
import gtksourceview2
_have_gtksourceview = True
except:
_have_gtksourceview = False
import pango
from gettext import gettext as _L
import lib.gui as gui
from lib.gui.qtwindow import QTWindow
def get_python_filter():
filter = gtk.FileFilter()
filter.set_name(_L('Python files'))
filter.add_pattern('*.py')
return filter
class DirPane(gtk.VBox):
def __init__(self):
gtk.VBox.__init__(self)
self.entry = gtk.Entry()
self.pack_start(gui.pack_hbox(
[gtk.Label('Root dir'), self.entry],
False, False), False, False)
self.entry.connect('activate', self._entry_activated_cb)
self.dir_browser = dirtree.DirectoryTree('.')
self.dir_browser.set_size_request(200, -1)
self.add(self.dir_browser)
def _entry_activated_cb(self, sender):
self.dir_browser.open_dir(sender.get_text())
class SourcePage(gtk.VBox):
def __init__(self, filename=None):
gtk.VBox.__init__(self)
self.setup_source_view()
self.add(self._source_win)
if filename is not None:
self.load_file(filename)
self.show_all()
def setup_source_view(self):
self._buffer = gtksourceview2.Buffer()
lang_manager = gtksourceview2.language_manager_get_default()
if 'python' in lang_manager.get_language_ids():
lang = lang_manager.get_language('python')
self._buffer.set_language(lang)
self._source_view = gtksourceview2.View(self._buffer)
self._source_view.set_editable(True)
self._source_view.set_cursor_visible(True)
self._source_view.set_show_line_numbers(True)
self._source_view.set_wrap_mode(gtk.WRAP_CHAR)
self._source_view.modify_font(pango.FontDescription("Monospace 10"))
self._source_win = gtk.ScrolledWindow()
self._source_win.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._source_win.add(self._source_view)
self._find_tag = self._buffer.create_tag('find')
self._find_tag.props.background = 'gray'
self._find_tag.props.foreground = 'yellow'
def load_file(self, filename):
f = open(filename)
data = f.read()
f.close()
self._buffer.set_text(data)
class TabLabel(gtk.HBox):
def __init__(self, label):
gtk.HBox.__init__(self, spacing=5)
self.pack_start(gtk.Label(label))
icon = gtk.Image()
icon.set_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
self.icon = gtk.Button()
self.icon.add(icon)
self.pack_start(self.icon)
self.show_all()
class SourceWindow(QTWindow):
def __init__(self):
QTWindow.__init__(self, 'source', 'Source')
self.connect("delete-event", self._delete_event_cb)
self._find_string = ''
self._find_ofs = 0
menu = [
{'name': _L('File'), 'submenu':
[
{'name': _L('Open'),
'action': self._open_cb, 'accel': '<Control>o'},
{'name': _L('Close'),
'action': self._close_cb, 'accel': '<Control>x'},
{'name': _L('Save'),
'action': self._save_cb, 'accel': '<Control>s'},
{'name': _L('Save as'), 'action': self._save_as_cb},
{'name': _L('Run'),
'action': self._run_clicked_cb, 'accel': '<Control>r'}
]
},
{'name': _L('Edit'), 'submenu':
[
{'name': _L('Find'),
'action': self._find_cb, 'accel': '<Control>f'},
{'name': _L('Find next'),
'action': self._find_next_cb, 'accel': '<Control>n'},
{'name': _L('Find previous'),
'action': self._find_prev_cb, 'accel': '<Control>p'},
]
}
]
self._accel_group = gtk.AccelGroup()
self.add_accel_group(self._accel_group)
self._menu = gui.build_menu(menu, accelgroup=self._accel_group)
# Run menu
self._name = gtk.Entry()
self._run_button = gtk.Button(_L('Run'))
self._run_button.connect('clicked', self._run_clicked_cb)
self._options = gui.pack_hbox([
gtk.Label(_L('Name')),
self._name,
self._run_button
])
# Directory and edit panes
self._file_info = {}
self._notebook = gtk.Notebook()
self._dir_pane = DirPane()
self._dir_pane.dir_browser.connect('file-activated',
self._file_activated_cb)
self._panes = gtk.HPaned()
self._panes.add1(self._dir_pane)
self._panes.add2(self._notebook)
# Put everything together
self._vbox = gtk.VBox()
self._vbox.pack_start(self._menu, False, False)
self._vbox.pack_start(self._options, False, False)
self._vbox.pack_start(self._panes, True, True)
self.add(self._vbox)
self._vbox.show_all()
def _delete_event_cb(self, widget, event, data=None):
self.hide()
return True
def _save_cb(self, sender):
self.save_file()
def _save_as_cb(self, sender):
chooser = gtk.FileChooserDialog(
_L('Save as'), None,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.add_filter(get_python_filter())
result = chooser.run()
if result == gtk.RESPONSE_OK:
filename = chooser.get_filename()
self.save_file(filename)
chooser.destroy()
def _open_cb(self, sender):
chooser = gtk.FileChooserDialog(
_L('Select file'), None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.add_filter(get_python_filter())
result = chooser.run()
if result == gtk.RESPONSE_OK:
filename = chooser.get_filename()
self.load_file(filename)
chooser.destroy()
def _close_cb(self, sender):
curpage = self._notebook.get_current_page()
page = self._notebook.get_nth_page(curpage)
self._close_clicked_cb(None, page)
def get_page_filename(self, page):
for filename, info in self._file_info.iteritems():
if info['page'] == page:
return filename
return None
def load_file(self, filename):
if filename in self._file_info:
return
page = SourcePage(filename)
pagenum = self._notebook.append_page(page)
self._notebook.set_current_page(pagenum)
dir, fname = os.path.split(filename)
pagelabel = TabLabel(fname)
pagelabel.icon.connect('clicked', self._close_clicked_cb, page)
self._notebook.set_tab_label(page, pagelabel)
self._file_info[filename] = {
'page': page,
}
def _file_activated_cb(self, sender, filename):
self.load_file(filename)
def _close_clicked_cb(self, sender, page):
filename = self.get_page_filename(page)
del self._file_info[filename]
index = self._notebook.page_num(page)
if index != -1:
self._notebook.remove_page(index)
def save_file(self, filename=None):
if filename is None:
filename = self._filename
if not os.path.exists(filename):
self._filename = filename
f = open(filename, 'w+')
start, end = self._buffer.get_bounds()
f.write(self._buffer.get_text(start, end))
f.close()
else:
print 'File exists already, not overwritten'
def _highlight_result(self, startofs, endofs):
start = self._buffer.get_iter_at_offset(startofs)
end = self._buffer.get_iter_at_offset(endofs)
self._buffer.apply_tag(self._find_tag, start, end)
self._source_view.scroll_to_iter(start, 0.25)
def _prepare_find(self):
start, end = self._buffer.get_bounds()
self._buffer.remove_tag(self._find_tag, start, end)
buftext = self._buffer.get_text(start, end)
return buftext
def _do_find(self, text, backward=False):
buftext = self._prepare_find()
ofs = self._buffer.props.cursor_position
self._find_string = text
if backward:
ofs = buftext.rfind(self._find_string, 0, ofs)
else:
ofs = buftext.find(self._find_string, ofs)
if ofs != -1:
self._highlight_result(ofs, ofs + len(text))
self._find_ofs = ofs
def _do_find_next(self):
if len(self._find_string) == 0:
return
buftext = self._prepare_find()
ofs = buftext.find(self._find_string, self._find_ofs + 1)
if ofs != -1:
self._highlight_result(ofs, ofs + len(self._find_string))
self._find_ofs = ofs
else:
self._find_ofs = 0
def _do_find_prev(self):
if len(self._find_string) == 0:
return
buftext = self._prepare_find()
ofs = buftext.rfind(self._find_string, 0, self._find_ofs - 1)
if ofs != -1:
self._highlight_result(ofs, ofs + len(self._find_string))
self._find_ofs = ofs
else:
self._find_ofs = len(buftext)
def _find_cb(self, sender):
dialog = gtk.Dialog(title=_L('Find'), parent=self,
flags=gtk.DIALOG_MODAL,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
vbox = dialog.vbox
entry = gtk.Entry()
vbox.pack_start(entry, False, False)
vbox.show_all()
res = dialog.run()
if res == gtk.RESPONSE_OK:
text = entry.get_text()
self._do_find(text)
dialog.destroy()
def _find_next_cb(self, sender):
self._do_find_next()
def _find_prev_cb(self, sender):
self._do_find_prev()
def _run_clicked_cb(self, sender):
fn = os.path.join(tempfile.gettempdir(), '%i.py' % time.time())
f = open(fn, 'w+')
start, end = self._buffer.get_bounds()
f.write(self._buffer.get_text(start, end))
f.close()
qtrun_thread(fn)
# os.remove(fn)
Window = SourceWindow
if __name__ == '__main__':
win = SourceWindow()
gtk.main()
| [
"wolfgangpfff@gmail.com"
] | wolfgangpfff@gmail.com |
dccd735aa9fd375a30bffb83d855082c3471c27f | b03e343677a5fd8b8bd0a616d0ef661f78159674 | /feelings/feelings/settings.py | b61574115b933e2fa181ab810410a70e58cb47f6 | [] | no_license | srikarporeddy/projectfeelings | 5407f6d95d7160f6f894081844255e5237306d6d | 122392b6d58d888e82e049efecca4506424af0b6 | refs/heads/master | 2021-01-23T01:26:21.521541 | 2017-06-02T17:56:22 | 2017-06-02T17:56:22 | 92,872,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | """
Django settings for feelings project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sxv_gumxe!k1)9awm0940=%lq)qcid_(#zwk3-7pi5*(w@@)i0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'users',
'thoughts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'feelings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'feelings.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'assests'),]
LOGIN_URL = "users:login"
LOGIN_REDIRECT_URL = "users:dashboard"
| [
"srikarporeddy@gmail.com"
] | srikarporeddy@gmail.com |
c5e03d03ec50a0095dff0e4e1b820f5760f7df64 | e121dcc5d23e225891420e730549b9cc7ebe8e88 | /python/lib/direct/test/ModelScreenShot.py | aa59a44301d37bea98c26ab6ca7d651b90d3a4ea | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | PlumpMath/panda3d-3 | 4f4cf7627eddae9b7f30795e0a0657b01fdf670d | 5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914 | refs/heads/master | 2021-01-25T06:55:36.209044 | 2014-09-29T14:24:53 | 2014-09-29T14:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | import direct
from panda3d.pandac import loadPrcFileData
from panda3d.direct.showbase.DirectObject import DirectObject
from panda3d.direct.directbase.DirectStart import *
from panda3d.pandac import *
import panda3d.direct.gui.DirectGuiGlobals as DGG
from panda3d.direct.gui.DirectGui import *
from panda3d.direct.task import Task
from panda3d.direct.directnotify import DirectNotifyGlobal
import math
from operator import *
import ModelScreenShotGlobals
class ModelScreenShot(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("ModelScreenShot")
def __init__(self):
# Grab a list of models to capture screenshots of from an array in
# the globals file
self.modelsToView = ModelScreenShotGlobals.models
self.models = []
# Attach all the models listed to render and save a pointer to them
# in an array. Then hide the model.
for model in self.modelsToView:
m = loader.loadModel(model)
m.reparentTo(render)
self.models.append(m)
m.hide()
# Set a nice farplane far, far away
self.lens = base.camera.getChild(0).node().getLens()
self.lens.setFar(10000)
# Hide the cursor
self.props = WindowProperties()
self.props.setCursorHidden(0)
base.win.requestProperties(self.props)
# Method for getting the distance to an object from the camera
def getDist(obj, lens):
rad = obj.getBounds().getRadius()
fov = lens.getFov()
dist = rad / math.tan(deg2Rad(min(fov[0], fov[1]/2.0)))
return dist
# Determin the optimal camera position
def getOptCamPos(obj, dist):
cen = obj.getBounds().getCenter()
camPos = VBase3(cen.getX(), -dist, cen.getZ())
return camPos
# Generate screenshots
def generatePics():
for model in self.models:
model.show()
base.camera.setPos(getOptCamPos(model, getDist(model, self.lens)))
uFilename = model.getName().replace('.egg','.jpg')
self.notify.info("screenshot %s camera pos: %s" % (uFilename, base.camera.getPos()))
base.graphicsEngine.renderFrame()
base.screenshot(namePrefix = uFilename, defaultFilename = 0)
model.hide()
generatePics()
mss = ModelScreenShot()
run()
| [
"ralf.kaestner@gmail.com"
] | ralf.kaestner@gmail.com |
74dc0aa2f64c2ed5f40a01ad6b6f54b7cf178236 | 46563ccc5da11bb4b68bc2b27a40524af4d241b9 | /Dynamic_Routing_Between_Capsules/params.py | c8bc9eb8b09396b18606467f9d60a5aaed6ec2c2 | [
"MIT"
] | permissive | rahul-c1/Implementations-of-Deep-Learning-Models | e7e974b943782cb8b8afc5b6158ffee27f1a2248 | 4c1fe059b7c46c22790f716ca57d51bddc6248ac | refs/heads/master | 2020-03-24T09:58:46.521183 | 2018-01-20T05:22:55 | 2018-01-20T05:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | conv1_params = {
'filters': 256,
'kernel_size': 9,
'strides': 1,
'padding': 'valid',
'activation': 'relu'
}
conv2_params = {
'filters': 256,
'kernel_size': 9,
'strides': 2,
'padding': 'valid',
'activation': 'relu',
}
batch_size = 256
input_shape = [28, 28, 1]
primary_capsules_shape = [1152, 8]
digits_capsules_params = {
'num_capsule': 10,
'dim_capsule': 16,
'routing_iterations': 3
}
dense1, dense2 = 512, 1024
margin_loss_lambda = 0.5
reconstruction_loss_coefficient = 0.0005 | [
"maxpanziyuan@gmail.com"
] | maxpanziyuan@gmail.com |
5791d354413791123e36b62c42afffb84cb87e3d | eb1eb25dd8c68192b37693c8e0224bf047da9fed | /django_app/utils/fields/__init__.py | f63012d6f3da5fe9ee0e253f78d4615048cfbe26 | [] | no_license | WeatherSound/WeatherSoundTest | bae5e52a88b180f6493d306eac326b52524013af | 8d341a5f9e5c571c441f1e402f7dae8938cabb02 | refs/heads/master | 2021-01-02T08:12:18.722213 | 2017-08-21T07:44:12 | 2017-08-21T07:44:12 | 98,959,749 | 1 | 1 | null | 2017-09-04T05:32:17 | 2017-08-01T04:36:05 | Python | UTF-8 | Python | false | false | 33 | py | from .custom_imagefields import * | [
"qufskan9396@gmail.com"
] | qufskan9396@gmail.com |
e4f81b86df0300aaaa88eb3081f241403e987142 | 183bb8e9998a3eeebdc6dd0a5bf77525ef005a1f | /ribbit/ribbit_app/migrations/0008_auto__del_field_bookpasser_content__del_field_bookpasser_location__add.py | 5124b0c9c3937fa7db6e75d1fb8c501b19ae4227 | [] | no_license | gzpgg3x/SEARSHackPresentable | dcc7d3187bc459af5e8c535af8644d5d0fba7b05 | 0a78b6555f6e126506fa4f684a6b1d93b106d69a | refs/heads/master | 2020-05-30T10:39:29.211767 | 2014-06-08T16:08:05 | 2014-06-08T16:08:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,167 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'bookPasser.content'
db.delete_column(u'ribbit_app_bookpasser', 'content')
# Deleting field 'bookPasser.location'
db.delete_column(u'ribbit_app_bookpasser', 'location')
# Adding field 'bookPasser.brand'
db.add_column(u'ribbit_app_bookpasser', 'brand',
self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True),
keep_default=False)
# Adding field 'bookPasser.product'
db.add_column(u'ribbit_app_bookpasser', 'product',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'bookPasser.content'
db.add_column(u'ribbit_app_bookpasser', 'content',
self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True),
keep_default=False)
# Adding field 'bookPasser.location'
db.add_column(u'ribbit_app_bookpasser', 'location',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
# Deleting field 'bookPasser.brand'
db.delete_column(u'ribbit_app_bookpasser', 'brand')
# Deleting field 'bookPasser.product'
db.delete_column(u'ribbit_app_bookpasser', 'product')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ribbit_app.bookpasser': {
'Meta': {'object_name': 'bookPasser'},
'brand': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'ribbit_app.branch': {
'Meta': {'object_name': 'Branch'},
'branchaddress': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'branchname': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'branchphone': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ribbit_app.shout': {
'Meta': {'object_name': 'Shout'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'book': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'branchname': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'count': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'lng': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'ribbit_app.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'follows': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_by'", 'symmetrical': 'False', 'to': u"orm['ribbit_app.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['ribbit_app'] | [
"gzpgg3x@yahoo.com"
] | gzpgg3x@yahoo.com |
37534e0ae850d4fbd06ebdf37fae551e63e9146a | 44ba7f2c3e396ab2c58ce42763da5c18f5d0db4b | /ethicml/evaluators/evaluate_models.py | ed7b22f01b736f5b26cc799f4a555cd9f848e85d | [] | no_license | anonymous-iclr-3518/code-for-submission | 99e45110d2377c08433b619afb9c14cf645be5b0 | 3aecb7642d9611ae0a61cd47948931f8f47b6f76 | refs/heads/main | 2023-01-13T18:27:03.728542 | 2020-11-25T15:21:49 | 2020-11-25T15:21:49 | 315,338,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,770 | py | """Runs given metrics on given algorithms for given datasets."""
from collections import OrderedDict
from pathlib import Path
from typing import Dict, List, NamedTuple, Optional, Sequence, Union
import pandas as pd
from tqdm import tqdm
from ethicml.algorithms.inprocess.in_algorithm import InAlgorithm
from ethicml.algorithms.postprocess.post_algorithm import PostAlgorithm
from ethicml.algorithms.preprocess.pre_algorithm import PreAlgorithm
from ethicml.data.dataset import Dataset
from ethicml.data.load import load_data
from ethicml.metrics.metric import Metric
from ethicml.preprocessing import DataSplitter, RandomSplit
from ethicml.utility import (
DataTuple,
Prediction,
Results,
ResultsAggregator,
TestTuple,
TrainTestPair,
make_results,
)
from .parallelism import run_in_parallel
from .per_sensitive_attribute import (
MetricNotApplicable,
diff_per_sensitive_attribute,
metric_per_sensitive_attribute,
ratio_per_sensitive_attribute,
)
__all__ = ["evaluate_models", "run_metrics", "load_results", "evaluate_models_async"]
def get_sensitive_combinations(metrics: List[Metric], train: DataTuple) -> List[str]:
"""Get all possible combinations of sensitive attribute and metrics."""
poss_values: List[str] = []
for col in train.s.columns:
uniques = train.s[col].unique()
for unique in uniques:
poss_values.append(f"{col}_{unique}")
return [f"{s}_{m.name}" for s in poss_values for m in metrics]
def per_sens_metrics_check(per_sens_metrics: Sequence[Metric]) -> None:
"""Check if the given metrics allow application per sensitive attribute."""
for metric in per_sens_metrics:
if not metric.apply_per_sensitive:
raise MetricNotApplicable(
f"Metric {metric.name} is not applicable per sensitive "
f"attribute, apply to whole dataset instead"
)
def run_metrics(
predictions: Prediction,
actual: DataTuple,
metrics: Sequence[Metric] = (),
per_sens_metrics: Sequence[Metric] = (),
diffs_and_ratios: bool = True,
) -> Dict[str, float]:
"""Run all the given metrics on the given predictions and return the results.
Args:
predictions: DataFrame with predictions
actual: DataTuple with the labels
metrics: list of metrics
per_sens_metrics: list of metrics that are computed per sensitive attribute
diffs_and_ratios: if True, compute diffs and ratios per sensitive attribute
"""
result: Dict[str, float] = {}
if predictions.hard.isna().any(axis=None):
return {"algorithm_failed": 1.0}
for metric in metrics:
result[metric.name] = metric.score(predictions, actual)
for metric in per_sens_metrics:
per_sens = metric_per_sensitive_attribute(predictions, actual, metric)
if diffs_and_ratios:
diff_per_sens = diff_per_sensitive_attribute(per_sens)
ratio_per_sens = ratio_per_sensitive_attribute(per_sens)
per_sens.update(diff_per_sens)
per_sens.update(ratio_per_sens)
for key, value in per_sens.items():
result[f"{metric.name}_{key}"] = value
for key, value in predictions.info.items():
result[key] = value
return result # SUGGESTION: we could return a DataFrame here instead of a dictionary
def load_results(
dataset_name: str,
transform_name: str,
topic: Optional[str] = None,
outdir: Path = Path(".") / "results",
) -> Optional[Results]:
"""Load results from a CSV file that was created by `evaluate_models`.
Args:
dataset_name: name of the dataset of the results
transform_name: name of the transformation that was used for the results
topic: (optional) topic string of the results
outdir: directory where the results are stored
Returns:
DataFrame if the file exists; None otherwise
"""
csv_file = _result_path(outdir, dataset_name, transform_name, topic)
if csv_file.is_file():
return make_results(csv_file)
return None
def _result_path(
outdir: Path, dataset_name: str, transform_name: str, topic: Optional[str]
) -> Path:
base_name: str = "" if topic is None else f"{topic}_"
return outdir / f"{base_name}{dataset_name}_{transform_name}.csv"
def _delete_previous_results(
outdir: Path, datasets: List[Dataset], transforms: Sequence[PreAlgorithm], topic: Optional[str]
) -> None:
for dataset in datasets:
transform_list = ["no_transform"]
for preprocess_model in transforms:
transform_list.append(preprocess_model.name)
for transform_name in transform_list:
path_to_file: Path = _result_path(outdir, dataset.name, transform_name, topic)
if path_to_file.exists():
path_to_file.unlink()
def evaluate_models(
datasets: List[Dataset],
preprocess_models: Sequence[PreAlgorithm] = (),
inprocess_models: Sequence[InAlgorithm] = (),
postprocess_models: Sequence[PostAlgorithm] = (),
metrics: Sequence[Metric] = (),
per_sens_metrics: Sequence[Metric] = (),
repeats: int = 1,
test_mode: bool = False,
delete_prev: bool = False,
splitter: Optional[DataSplitter] = None,
topic: Optional[str] = None,
fair_pipeline: bool = True,
) -> Results:
"""Evaluate all the given models for all the given datasets and compute all the given metrics.
Args:
datasets: list of dataset objects
preprocess_models: list of preprocess model objects
inprocess_models: list of inprocess model objects
postprocess_models: list of postprocess model objects
metrics: list of metric objects
per_sens_metrics: list of metric objects that will be evaluated per sensitive attribute
repeats: number of repeats to perform for the experiments
test_mode: if True, only use a small subset of the data so that the models run faster
delete_prev: False by default. If True, delete saved results in directory
splitter: (optional) custom train-test splitter
topic: (optional) a string that identifies the run; the string is prepended to the filename
fair_pipeline: if True, run fair inprocess algorithms on the output of preprocessing
"""
# pylint: disable=too-many-arguments
per_sens_metrics_check(per_sens_metrics)
train_test_split: DataSplitter
if splitter is None:
train_test_split = RandomSplit(train_percentage=0.8, start_seed=0)
else:
train_test_split = splitter
columns = ["dataset", "transform", "model", "split_id"]
total_experiments = (
len(datasets)
* repeats
* (len(preprocess_models) + ((1 + len(preprocess_models)) * len(inprocess_models)))
)
outdir = Path(".") / "results"
outdir.mkdir(exist_ok=True)
if delete_prev:
_delete_previous_results(outdir, datasets, preprocess_models, topic)
pbar = tqdm(total=total_experiments, smoothing=0)
for dataset in datasets:
# ================================== begin: one repeat ====================================
for split_id in range(repeats):
train: DataTuple
test: DataTuple
train, test, split_info = train_test_split(load_data(dataset), split_id=split_id)
if test_mode:
# take smaller subset of training data to speed up training
train = train.get_subset()
to_operate_on: Dict[str, TrainTestPair] = {
"no_transform": TrainTestPair(train=train, test=test)
}
# ========================== begin: run preprocessing models ==========================
for pre_process_method in preprocess_models:
logging: "OrderedDict[str, str]" = OrderedDict()
logging["model"] = pre_process_method.name
logging["dataset"] = dataset.name
logging["repeat"] = str(split_id)
pbar.set_postfix(ordered_dict=logging)
new_train, new_test = pre_process_method.run(train, test)
to_operate_on[pre_process_method.name] = TrainTestPair(
train=new_train, test=new_test
)
pbar.update()
# =========================== end: run preprocessing models ===========================
# ========================= begin: loop over preprocessed data ========================
for transform_name, transform in to_operate_on.items():
transformed_train: DataTuple = transform.train
transformed_test: Union[DataTuple, TestTuple] = transform.test
results_df = pd.DataFrame(columns=columns)
# ========================== begin: run inprocess models ==========================
for model in inprocess_models:
if (
not fair_pipeline
and transform_name != "no_transform"
and model.is_fairness_algo
):
pbar.update()
continue
logging = OrderedDict()
logging["model"] = model.name
logging["dataset"] = dataset.name
logging["transform"] = transform_name
logging["repeat"] = str(split_id)
pbar.set_postfix(ordered_dict=logging)
temp_res: Dict[str, Union[str, float]] = {
"dataset": dataset.name,
"transform": transform_name,
"model": model.name,
"split_id": split_id,
**split_info,
}
predictions: Prediction = model.run(transformed_train, transformed_test)
temp_res.update(run_metrics(predictions, test, metrics, per_sens_metrics))
for postprocess in postprocess_models:
# Post-processing has yet to be defined
# - leaving blank until we have an implementation to work with
pass
results_df = results_df.append(temp_res, ignore_index=True, sort=False)
pbar.update()
# =========================== end: run inprocess models ===========================
csv_file = _result_path(outdir, dataset.name, transform_name, topic)
aggregator = ResultsAggregator(results_df)
# put old results before new results -> prepend=True
aggregator.append_from_csv(csv_file, prepend=True)
aggregator.save_as_csv(csv_file)
# ========================== end: loop over preprocessed data =========================
# =================================== end: one repeat =====================================
pbar.close() # very important! when we're not using "with", we have to close tqdm manually
preprocess_names = [model.name for model in preprocess_models]
aggregator = ResultsAggregator() # create empty aggregator object
for dataset in datasets:
for transform_name in ["no_transform"] + preprocess_names:
csv_file = _result_path(outdir, dataset.name, transform_name, topic)
aggregator.append_from_csv(csv_file)
return aggregator.results
class _DataInfo(NamedTuple):
test: DataTuple
dataset_name: str
transform_name: str
split_info: Dict[str, float]
async def evaluate_models_async(
datasets: List[Dataset],
preprocess_models: Sequence[PreAlgorithm] = (),
inprocess_models: Sequence[InAlgorithm] = (),
postprocess_models: Sequence[PostAlgorithm] = (),
metrics: Sequence[Metric] = (),
per_sens_metrics: Sequence[Metric] = (),
repeats: int = 1,
test_mode: bool = False,
delete_prev: bool = False,
splitter: Optional[DataSplitter] = None,
topic: Optional[str] = None,
fair_pipeline: bool = True,
max_parallel: int = 1,
) -> Results:
"""Evaluate all the given models for all the given datasets and compute all the given metrics.
Args:
datasets: list of dataset objects
preprocess_models: list of preprocess model objects
inprocess_models: list of inprocess model objects
postprocess_models: list of postprocess model objects
metrics: list of metric objects
per_sens_metrics: list of metric objects that will be evaluated per sensitive attribute
repeats: number of repeats to perform for the experiments
test_mode: if True, only use a small subset of the data so that the models run faster
delete_prev: False by default. If True, delete saved results in directory
splitter: (optional) custom train-test splitter
topic: (optional) a string that identifies the run; the string is prepended to the filename
fair_pipeline: if True, run fair inprocess algorithms on the output of preprocessing
max_parallel: max number of threads ot run in parallel (default: 1)
"""
# pylint: disable=too-many-arguments
del postprocess_models # not used at the moment
per_sens_metrics_check(per_sens_metrics)
if splitter is None:
train_test_split: DataSplitter = RandomSplit(train_percentage=0.8, start_seed=0)
else:
train_test_split = splitter
default_transform_name = "no_transform"
outdir = Path(".") / "results" # OS-independent way of saying './results'
outdir.mkdir(exist_ok=True)
if delete_prev:
_delete_previous_results(outdir, datasets, preprocess_models, topic)
all_results = ResultsAggregator()
# ======================================= prepare data ========================================
data_splits: List[TrainTestPair] = []
test_data: List[_DataInfo] = [] # contains the test set and other things needed for the metrics
for dataset in datasets:
for split_id in range(repeats):
train: DataTuple
test: DataTuple
train, test, split_info = train_test_split(load_data(dataset), split_id=split_id)
if test_mode:
# take smaller subset of training data to speed up training
train = train.get_subset()
train = train.replace(name=f"{train.name} ({split_id})")
data_splits.append(TrainTestPair(train, test))
split_info.update({"split_id": split_id})
test_data.append(_DataInfo(test, dataset.name, default_transform_name, split_info))
# ============================= inprocess models on untransformed =============================
all_predictions = await run_in_parallel(inprocess_models, data_splits, max_parallel)
inprocess_untransformed = _gather_metrics(
all_predictions, test_data, inprocess_models, metrics, per_sens_metrics, outdir, topic
)
all_results.append_df(inprocess_untransformed)
# ===================================== preprocess models =====================================
# run all preprocess models
all_transformed = await run_in_parallel(preprocess_models, data_splits, max_parallel)
# append the transformed data to `transformed_data`
transformed_data: List[TrainTestPair] = []
transformed_test: List[_DataInfo] = []
for transformed, pre_model in zip(all_transformed, preprocess_models):
for (transf_train, transf_test), data_info in zip(transformed, test_data):
transformed_data.append(TrainTestPair(transf_train, transf_test))
transformed_test.append(
_DataInfo(
data_info.test, data_info.dataset_name, pre_model.name, data_info.split_info
)
)
# ============================= inprocess models on transformed ===============================
if fair_pipeline:
run_on_transformed = inprocess_models
else:
# if not fair pipeline, run only the non-fair models on the transformed data
run_on_transformed = [model for model in inprocess_models if not model.is_fairness_algo]
transf_preds = await run_in_parallel(run_on_transformed, transformed_data, max_parallel)
transf_results = _gather_metrics(
transf_preds, transformed_test, run_on_transformed, metrics, per_sens_metrics, outdir, topic
)
all_results.append_df(transf_results)
# ======================================== return all =========================================
return all_results.results
def _gather_metrics(
all_predictions: List[List[Prediction]],
test_data: Sequence[_DataInfo],
inprocess_models: Sequence[InAlgorithm],
metrics: Sequence[Metric],
per_sens_metrics: Sequence[Metric],
outdir: Path,
topic: Optional[str],
) -> Results:
"""Take a list of lists of predictions and compute all metrics."""
columns = ["dataset", "transform", "model", "split_id"]
# transpose `all_results` so that the order in the results dataframe is correct
num_cols = len(all_predictions[0]) if all_predictions else 0
all_predictions_t = [[row[i] for row in all_predictions] for i in range(num_cols)]
all_results = ResultsAggregator()
# compute metrics, collect them and write them to files
for preds_for_dataset, data_info in zip(all_predictions_t, test_data):
# ============================= handle results of one dataset =============================
results_df = pd.DataFrame(columns=columns) # create empty results dataframe
predictions: Prediction
for predictions, model in zip(preds_for_dataset, inprocess_models):
# construct a row of the results dataframe
df_row: Dict[str, Union[str, float]] = {
"dataset": data_info.dataset_name,
"transform": data_info.transform_name,
"model": model.name,
**data_info.split_info,
}
df_row.update(run_metrics(predictions, data_info.test, metrics, per_sens_metrics))
results_df = results_df.append(df_row, ignore_index=True, sort=False)
# write results to CSV files and load previous results from the files if they already exist
csv_file = _result_path(outdir, data_info.dataset_name, data_info.transform_name, topic)
aggregator = ResultsAggregator(results_df)
# put old results before new results -> prepend=True
aggregator.append_from_csv(csv_file, prepend=True)
aggregator.save_as_csv(csv_file)
all_results.append_df(aggregator.results)
return all_results.results
| [
"anon@ymo.us"
] | anon@ymo.us |
0465cbce511a108a717722c70a4f8a2073edcec1 | 988fc095bc5487fdbb2b293106e012efde4a35d8 | /tests/functional_tests.py | 4335907934dfd8743b63da981ba201b36988ebb1 | [
"MIT"
] | permissive | Nagasaki45/pathtag | d614ea77ab551be8bae22ce21ae5e65603f4a038 | dfd66186959715f71f6ecd583521f0cd03f2f17e | refs/heads/master | 2020-12-20T22:53:37.714411 | 2018-09-11T23:06:49 | 2018-09-11T23:06:49 | 28,537,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | import unittest
import shutil
import os
import tarfile
import subprocess
from mutagen.easyid3 import EasyID3
MATERIALS_DIR = 'tests/materials'
BACKUP = 'backup.tar'
class MainTest(unittest.TestCase):
def setUp(self):
# Backup MATERIALS_DIR
with tarfile.TarFile(BACKUP, 'w') as backup:
backup.add(MATERIALS_DIR)
# Run pathtag.py on it
subprocess.check_call(['python', 'pathtag.py', MATERIALS_DIR])
def tearDown(self):
# Remove manipulated dir
shutil.rmtree(MATERIALS_DIR)
# Restore the backup
with tarfile.TarFile(BACKUP) as backup:
backup.extractall()
# Remove backup
os.remove(BACKUP)
def load_track(self, *args):
args = [MATERIALS_DIR] + list(args)
return EasyID3(os.path.join(*args))
def test_standard_behavior(self):
track = self.load_track('artist', 'album', 'track.mp3')
self.assertEqual(track['artist'], ['artist'])
self.assertEqual(track['album'], ['album'])
def test_unknown_album(self):
track = self.load_track('artist', 'unknown_album_track.mp3')
self.assertEqual(track['album'], ['Unknown'])
def test_illegal_path_no_dir(self):
track = self.load_track('illegal_path_track.mp3')
self.assertEqual(track['album'], ['asdasd']) # Original value
self.assertEqual(track['artist'], ['asdasd']) # Original value
def test_illegal_path_too_nested(self):
track = self.load_track(
'artist', 'album', 'illegal_path_dir', 'illegal_path_track.mp3'
)
self.assertEqual(track['album'], ['asdasd']) # Original value
self.assertEqual(track['artist'], ['asdasd']) # Original value
if __name__ == '__main__':
unittest.main()
| [
"nagasaki45@gmail.com"
] | nagasaki45@gmail.com |
e055d3f40ff6a4d1d3f8f95db9dc115b493d590d | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/logic/v20190501/list_integration_account_map_content_callback_url.py | 622e2572a913a2f8a5e4fcf62ea045a1208cb715 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 5,729 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListIntegrationAccountMapContentCallbackUrlResult',
'AwaitableListIntegrationAccountMapContentCallbackUrlResult',
'list_integration_account_map_content_callback_url',
]
@pulumi.output_type
class ListIntegrationAccountMapContentCallbackUrlResult:
"""
The workflow trigger callback URL.
"""
def __init__(__self__, base_path=None, method=None, queries=None, relative_path=None, relative_path_parameters=None, value=None):
if base_path and not isinstance(base_path, str):
raise TypeError("Expected argument 'base_path' to be a str")
pulumi.set(__self__, "base_path", base_path)
if method and not isinstance(method, str):
raise TypeError("Expected argument 'method' to be a str")
pulumi.set(__self__, "method", method)
if queries and not isinstance(queries, dict):
raise TypeError("Expected argument 'queries' to be a dict")
pulumi.set(__self__, "queries", queries)
if relative_path and not isinstance(relative_path, str):
raise TypeError("Expected argument 'relative_path' to be a str")
pulumi.set(__self__, "relative_path", relative_path)
if relative_path_parameters and not isinstance(relative_path_parameters, list):
raise TypeError("Expected argument 'relative_path_parameters' to be a list")
pulumi.set(__self__, "relative_path_parameters", relative_path_parameters)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="basePath")
def base_path(self) -> str:
"""
Gets the workflow trigger callback URL base path.
"""
return pulumi.get(self, "base_path")
@property
@pulumi.getter
def method(self) -> str:
"""
Gets the workflow trigger callback URL HTTP method.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def queries(self) -> Optional['outputs.WorkflowTriggerListCallbackUrlQueriesResponseResult']:
"""
Gets the workflow trigger callback URL query parameters.
"""
return pulumi.get(self, "queries")
@property
@pulumi.getter(name="relativePath")
def relative_path(self) -> str:
"""
Gets the workflow trigger callback URL relative path.
"""
return pulumi.get(self, "relative_path")
@property
@pulumi.getter(name="relativePathParameters")
def relative_path_parameters(self) -> Optional[Sequence[str]]:
"""
Gets the workflow trigger callback URL relative path parameters.
"""
return pulumi.get(self, "relative_path_parameters")
@property
@pulumi.getter
def value(self) -> str:
"""
Gets the workflow trigger callback URL.
"""
return pulumi.get(self, "value")
class AwaitableListIntegrationAccountMapContentCallbackUrlResult(ListIntegrationAccountMapContentCallbackUrlResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIntegrationAccountMapContentCallbackUrlResult(
base_path=self.base_path,
method=self.method,
queries=self.queries,
relative_path=self.relative_path,
relative_path_parameters=self.relative_path_parameters,
value=self.value)
def list_integration_account_map_content_callback_url(integration_account_name: Optional[str] = None,
key_type: Optional[str] = None,
map_name: Optional[str] = None,
not_after: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIntegrationAccountMapContentCallbackUrlResult:
"""
Use this data source to access information about an existing resource.
:param str integration_account_name: The integration account name.
:param str key_type: The key type.
:param str map_name: The integration account map name.
:param str not_after: The expiry time.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['integrationAccountName'] = integration_account_name
__args__['keyType'] = key_type
__args__['mapName'] = map_name
__args__['notAfter'] = not_after
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:logic/v20190501:listIntegrationAccountMapContentCallbackUrl', __args__, opts=opts, typ=ListIntegrationAccountMapContentCallbackUrlResult).value
return AwaitableListIntegrationAccountMapContentCallbackUrlResult(
base_path=__ret__.base_path,
method=__ret__.method,
queries=__ret__.queries,
relative_path=__ret__.relative_path,
relative_path_parameters=__ret__.relative_path_parameters,
value=__ret__.value)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
0124697bac9f6283a8e32edd133b7c0657ef6f02 | 1eb0213140ada1c48edc5fb97b439d6556e6c3a9 | /0x0A-python-inheritance/7-base_geometry.py | 06615bc37e1f43fbd3545438200a215ece54b58c | [] | no_license | HeimerR/holbertonschool-higher_level_programming | 53d2a3c536fd9976bb7fea76dd2ecf9a6ba3297e | 892c0f314611c0a30765cf673e8413dbee567a2d | refs/heads/master | 2020-05-18T02:24:11.829328 | 2020-04-30T03:59:04 | 2020-04-30T03:59:04 | 184,112,468 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | #!/usr/bin/python3
""" Module base geometry
"""
class BaseGeometry:
""" empty class"""
def area(self):
""" area not defined"""
raise Exception('area() is not implemented')
def integer_validator(self, name, value):
"""validates value"""
if type(value) is not int:
raise TypeError('{} must be an integer'.format(name))
if value <= 0:
raise ValueError('{} must be greater than 0'.format(name))
| [
"ing.heimer.rojas@gmail.com"
] | ing.heimer.rojas@gmail.com |
62d02bad6ba62f87039310864a61db1b7807d6bb | d434f2ceb34b3eaad7d62fb71f01be16cdebd0d0 | /Stock scraping/marketwatch/middlewares.py | f052e93af23329ccfbac91415e77bfe8f744a9c3 | [] | no_license | webclinic017/Stock-scraping | 2f6c60ccc3114cff5f6bd60a267217f4809368a5 | d50800c30562429919882cf81f305ee6716392f7 | refs/heads/master | 2021-12-23T07:16:16.855059 | 2017-11-07T13:14:27 | 2017-11-07T13:14:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class MarketwatchSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"noreply@github.com"
] | webclinic017.noreply@github.com |
972e5e970aac49dbfe445b1ed561fc185a32d9b6 | e2992e19ebc728387125a70c72a702a076de7a12 | /Python/01_My_Programs_Hv/02_String/23_More_About_Variable.py | 9a16bce2ae933aba8672f9637575b868e1abc684 | [] | no_license | harsh1915/Machine_Learning | c9c32ed07df3b2648f7796f004ebb38726f13ae4 | c68a973cfbc6c60eeb94e253c6f2ce34baa3686e | refs/heads/main | 2023-08-27T15:01:16.430869 | 2021-11-15T07:53:36 | 2021-11-15T07:53:36 | 377,694,941 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | name, age = "HarsH", 22
print( "Hello "+ name+ " Your Age is "+ str( age))
a= b= c= 2
print( a+ b+ c) | [
"“hdjethva6@gmail.com”"
] | “hdjethva6@gmail.com” |
8a5dca801b4ec186f2b749ce1e27347e1b1e1750 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200912b_python2m6/day11_201121/sample/file_3_open.py | d6381dd6d88b7b7cebd514435788b08f725a6bd2 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
python file I/O
Opening Files
open()
first look
"""
# case 3. open file in specified full path
# different way to represent path in windows system
print("[info] open file in specified full path")
print("[info] opening file_open.txt ...")
f = open("D:/workspace/pycharm201803/ceit4101python/module_8_fileio/file_open.txt")
print("[info] closing ...")
f.close()
print("[info] done.")
| [
"lada314@gmail.com"
] | lada314@gmail.com |
7cbe4efbda319a44a4a563e56cc6bc8cae7c5f04 | c7967ec500b210513aa0b1f540144c931ca687ac | /알고리즘 스터디/개인공부/TwoPointer/PermutationSummation.py | 576694b4fd00b0f800b32c15f1f8c4361e775e12 | [] | no_license | sunminky/algorythmStudy | 9a88e02c444b10904cebae94170eba456320f8e8 | 2ee1b5cf1f2e5f7ef87b44643210f407c4aa90e2 | refs/heads/master | 2023-08-17T01:49:43.528021 | 2023-08-13T08:11:37 | 2023-08-13T08:11:37 | 225,085,243 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | # https://www.acmicpc.net/problem/2143
import sys
# 누적합 구하기
def accumulation(number):
result = dict()
for i in range(len(number)):
total = 0
for j in range(i, len(number)):
total += number[j]
result[total] = result.get(total, 0) + 1 # 이 부분합이 나올 수 있는 경우의 수를 구함
return result
if __name__ == '__main__':
target = int(sys.stdin.readline())
sys.stdin.readline()
arr1 = [*map(int, sys.stdin.readline().split())] # 배열1
acc1 = accumulation(arr1) # 배열1의 부분합의 등장 횟수
sys.stdin.readline()
arr2 = [*map(int, sys.stdin.readline().split())] # 배열2
acc2 = accumulation(arr2) # 배열2의 부분합의 등장 횟수
acc1_key = sorted(acc1.keys()) # 배열1의 부분합 정렬
acc2_key = sorted(acc2.keys()) # 배열2의 부분합 정렬
answer = 0
## 투포인터 ##
a1_idx = 0
a2_idx = len(acc2_key) - 1
while a1_idx < len(acc1_key) and a2_idx >= 0:
calc = acc1_key[a1_idx] + acc2_key[a2_idx] # 두 부분합의 합
# 타겟인 경우
if calc == target:
answer += acc1[acc1_key[a1_idx]] * acc2[acc2_key[a2_idx]]
if calc <= target:
a1_idx += 1
else:
a2_idx -= 1
print(answer) | [
"suns1502@gmail.com"
] | suns1502@gmail.com |
a36d671af009a8c76753ff5416319589a3318f3c | 1f08436bab6cd03bcfb257e8e49405cbc265195a | /3_list/Sample/list_ex20.py | a063117240f1387a45dd6d1559b3fcf38182856c | [] | no_license | kuchunbk/PythonBasic | e3ba6322f256d577e37deff09c814c3a374b93b2 | a87135d7a98be8830d30acd750d84bcbf777280b | refs/heads/master | 2020-03-10T04:28:42.947308 | 2018-04-17T04:25:51 | 2018-04-17T04:25:51 | 129,192,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | '''Question:
Write a Python program access the index of a list.
'''
# Python code:
nums = [5, 15, 35, 8, 98]
for num_index, num_val in enumerate(nums):
print(num_index, num_val)
'''Output sample:
0 5
1 15
2 35
3 8
4 98
''' | [
"kuchunbk@gmail.com"
] | kuchunbk@gmail.com |
c8feaa8ecfa5607b14bf76c8344255b16073b91b | 51ce07a419abe50f49e7bb6a6c036af291ea2ef5 | /3.Algorithm/04. Stack1/DFS.py | d2435fd628dfe83323c14e92d7e2adee161ae3b2 | [] | no_license | salee1023/TIL | c902869e1359246b6dd926166f5ac9209af7b1aa | 2905bd331e451673cbbe87a19e658510b4fd47da | refs/heads/master | 2023-03-10T09:48:41.377704 | 2021-02-24T10:47:27 | 2021-02-24T10:47:27 | 341,129,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # 재귀
def dfs(v):
# 방문체크
visited[v] = 1
print(v, end=' ')
# v의 인접한 정점중에서 방문안한 정점을 재귀호출
for w in range(1, V+1):
if G[v][w] == 1 and visited[w] == 0:
dfs(w)
# --------------------------------------------
V, E = map(int, input().split()) # 정점, 간선
temp = list(map(int, input().split())) # 간선들
G = [[0]*(V+1) for _ in range(V+1)] # 인접 행렬
visited = [0]*(V+1) # 방문 체크
# 간선들을 인접행렬에 저장
for i in range(E):
s, e = temp[2*i], temp[2*i+1]
G[s][e] = 1
G[e][s] = 1
dfs(1)
# 반복
'''
def dfs2(s,V):
# 초기화, 스택 생성, visitied[] 생성 및 초기화
visited = [0]*(V+1)
stack = []
stack.append(s) # 시작 노드 push()
visited[s] = 1
while stack: # 스택이 비어있지 않으면 반복
n = stack.pop() # 탐색할 노드 선택
for i in range(1,V+1):
if adj[n][i] == 1 and visited[i] == 0: # n에 인접한 노드가 있고, 방문안한 노드일 때,
stack.append(i)
visited[i] = 1
# --------------------------------------------------
V, E = map(int, input().split()) # V 정점 개수, E 간선 개수
adj = [[0]*(V+1) for _ in range(V+1)]
tmp = list(map(int, input().split()))
for i in range(E):
n1, n2 = tmp[i*2], tmp[i*2+1]
adj[n1][n2] = 1
adj[n2][n1] = 1 # 무방향 그래프인 경우
dfs(1, V)
'''
| [
"dltmddk1023@gmail.com"
] | dltmddk1023@gmail.com |
0a364178e1a3a1ca5c09b5d161d750af22a4a947 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20180101/get_virtual_network_gateway_advertised_routes.py | b50d1b09c509d3f5dc2c559d307478ea00d98982 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayAdvertisedRoutesResult',
'AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult',
'get_virtual_network_gateway_advertised_routes',
'get_virtual_network_gateway_advertised_routes_output',
]
@pulumi.output_type
class GetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
"""
List of gateway routes
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(GetVirtualNetworkGatewayAdvertisedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayAdvertisedRoutesResult(
value=self.value)
def get_virtual_network_gateway_advertised_routes(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes
:param str peer: The IP address of the peer
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180101:getVirtualNetworkGatewayAdvertisedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayAdvertisedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(
value=__ret__.value)
@_utilities.lift_output_func(get_virtual_network_gateway_advertised_routes)
def get_virtual_network_gateway_advertised_routes_output(peer: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_network_gateway_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVirtualNetworkGatewayAdvertisedRoutesResult]:
"""
List of virtual network gateway routes
:param str peer: The IP address of the peer
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
51db0a0726ebb48ef9d569a6e69bd653136c424f | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /blend3_pdep/pdep/network173_1.py | 956994280fffb5c4cd7b584a30d905195c414d55 | [] | no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 51,831 | py | species(
label = 'S(684)(683)',
structure = SMILES('C=C1C=CC(CC1)O[O]'),
E0 = (99.4946,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4400.73,'J/mol'), sigma=(7.16926,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=687.38 K, Pc=27.1 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.688355,0.0489311,4.89017e-05,-9.72672e-08,3.95319e-11,12106.9,24.8017], Tmin=(100,'K'), Tmax=(996.171,'K')), NASAPolynomial(coeffs=[19.3555,0.0319847,-1.29292e-05,2.56793e-09,-1.93082e-13,5509.43,-79.6317], Tmin=(996.171,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(99.4946,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(428.195,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(ROOJ)"""),
)
species(
label = 'O2(2)(2)',
structure = SMILES('[O][O]'),
E0 = (-8.62178,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1483.7],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (31.9988,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(887.157,'J/mol'), sigma=(3.467,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.53764,-0.00122828,5.36759e-06,-4.93128e-09,1.45955e-12,-1037.99,4.6718], Tmin=(100,'K'), Tmax=(1087.71,'K')), NASAPolynomial(coeffs=[3.16427,0.00169454,-8.00335e-07,1.5903e-10,-1.14891e-14,-1048.45,6.08303], Tmin=(1087.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.62178,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""O2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C7H9(682)(681)',
structure = SMILES('C=C1[CH]C=CCC1'),
E0 = (167.661,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (93.1464,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3775.14,'J/mol'), sigma=(6.40398,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=589.67 K, Pc=32.62 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.44032,0.00354784,0.0001505,-1.92761e-07,7.19358e-11,20248.9,17.5205], Tmin=(100,'K'), Tmax=(966.749,'K')), NASAPolynomial(coeffs=[15.3585,0.0286008,-1.01769e-05,2.03754e-09,-1.60012e-13,14082.7,-63.3387], Tmin=(966.749,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(167.661,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(382.466,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(C=CCJC=C)"""),
)
species(
label = 'HO2(8)(9)',
structure = SMILES('[O]O'),
E0 = (2.67648,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1112.81,1388.53,3298.45],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (33.0067,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(892.977,'J/mol'), sigma=(3.458,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.02956,-0.00263985,1.5223e-05,-1.71671e-08,6.26738e-12,322.677,4.84428], Tmin=(100,'K'), Tmax=(923.913,'K')), NASAPolynomial(coeffs=[4.15133,0.00191146,-4.11274e-07,6.34957e-11,-4.86385e-15,83.4208,3.09341], Tmin=(923.913,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(2.67648,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""HO2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C7H8(690)(689)',
structure = SMILES('C=C1C=CC=CC1'),
E0 = (169.147,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3784.18,'J/mol'), sigma=(6.18258,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=591.08 K, Pc=36.33 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88913,0.0328299,3.37063e-05,-5.81883e-08,2.16785e-11,20431.4,16.995], Tmin=(100,'K'), Tmax=(1043.73,'K')), NASAPolynomial(coeffs=[10.5104,0.0329227,-1.40442e-05,2.72618e-09,-1.97113e-13,16827,-33.6119], Tmin=(1043.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(169.147,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(13cyclohexadiene5methylene)"""),
)
species(
label = 'H(3)(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=C1C=CC([CH]C1)O[O](3444)',
structure = SMILES('C=C1C=CC([CH]C1)O[O]'),
E0 = (299.91,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.137,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.711318,0.0509732,3.29954e-05,-7.81972e-08,3.24103e-11,36208,27.0249], Tmin=(100,'K'), Tmax=(1005.86,'K')), NASAPolynomial(coeffs=[18.6698,0.030587,-1.2702e-05,2.52707e-09,-1.88971e-13,30013.8,-72.5515], Tmin=(1005.86,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(299.91,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(403.252,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(CCJCOOH) + radical(ROOJ)"""),
)
species(
label = 'C=C1C=C[C](CC1)O[O](3445)',
structure = SMILES('C=C1[CH]C=C(CC1)O[O]'),
E0 = (229.602,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.137,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.36769,0.0345181,7.2353e-05,-1.14114e-07,4.42221e-11,27730.3,26.2305], Tmin=(100,'K'), Tmax=(989.749,'K')), NASAPolynomial(coeffs=[16.1057,0.0329079,-1.30356e-05,2.55969e-09,-1.91454e-13,21974.4,-59.0594], Tmin=(989.749,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(229.602,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(403.252,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(C=CCJC=C) + radical(ROOJ)"""),
)
species(
label = 'C=C1[CH]CC(C=C1)O[O](3446)',
structure = SMILES('C=C1[CH]CC(C=C1)O[O]'),
E0 = (240.607,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.137,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.894804,0.0436122,5.81648e-05,-1.06117e-07,4.26386e-11,29072.1,23.6471], Tmin=(100,'K'), Tmax=(992.042,'K')), NASAPolynomial(coeffs=[19.372,0.0295428,-1.19376e-05,2.39877e-09,-1.82488e-13,22432.3,-80.3351], Tmin=(992.042,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(240.607,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(403.252,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(ROOJ) + radical(Allyl_S)"""),
)
species(
label = 'C=C1C=[C]C(CC1)O[O](3447)',
structure = SMILES('C=C1C=[C]C(CC1)O[O]'),
E0 = (337.336,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.137,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.651626,0.0535607,2.40819e-05,-6.80731e-08,2.86548e-11,40710.4,25.4081], Tmin=(100,'K'), Tmax=(1012.69,'K')), NASAPolynomial(coeffs=[18.28,0.0312975,-1.31015e-05,2.59251e-09,-1.92411e-13,34711.2,-71.8527], Tmin=(1012.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(337.336,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(403.252,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_S) + radical(ROOJ)"""),
)
species(
label = 'C=C1[C]=CC(CC1)O[O](3448)',
structure = SMILES('C=C1[C]=CC(CC1)O[O]'),
E0 = (298.49,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.137,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.596372,0.0556428,1.92062e-05,-6.35927e-08,2.74174e-11,36039.5,25.4215], Tmin=(100,'K'), Tmax=(1004.28,'K')), NASAPolynomial(coeffs=[17.8856,0.0319525,-1.2879e-05,2.494e-09,-1.8312e-13,30288.9,-69.4042], Tmin=(1004.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(298.49,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(403.252,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(ROOJ) + radical(C=CJC=C)"""),
)
species(
label = '[CH]=C1C=CC(CC1)O[O](3449)',
structure = SMILES('[CH]=C1C=CC(CC1)O[O]'),
E0 = (346.591,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.137,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.613031,0.0524721,3.21809e-05,-7.9804e-08,3.35824e-11,41826.7,26.1683], Tmin=(100,'K'), Tmax=(999.278,'K')), NASAPolynomial(coeffs=[19.6096,0.0291286,-1.18819e-05,2.36604e-09,-1.77956e-13,35399,-78.6301], Tmin=(999.278,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(346.591,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(403.252,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(ROOJ) + radical(Cds_P)"""),
)
species(
label = 'CC1=CC=C(CC1)O[O](3452)',
structure = SMILES('CC1=CC=C(CC1)O[O]'),
E0 = (111.258,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.520665,0.0645255,-2.23618e-05,-1.31162e-08,8.43929e-12,13516.8,28.0713], Tmin=(100,'K'), Tmax=(1063.25,'K')), NASAPolynomial(coeffs=[14.6788,0.0349422,-1.40338e-05,2.60861e-09,-1.83168e-13,9167.57,-47.3961], Tmin=(1063.25,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(111.258,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsOs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(ROOJ)"""),
)
species(
label = '[O]OC1CCC2=CC1C2(3453)',
structure = SMILES('[O]OC1CCC2=CC1C2'),
E0 = (370.405,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.986014,0.0475846,3.37446e-05,-7.35751e-08,2.99739e-11,44674.5,24.2772], Tmin=(100,'K'), Tmax=(1001.68,'K')), NASAPolynomial(coeffs=[15.5265,0.0343525,-1.3576e-05,2.6005e-09,-1.89614e-13,39512.3,-57.1239], Tmin=(1001.68,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.405,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(428.195,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + polycyclic(s3_4_6_ene_4) + radical(ROOJ)"""),
)
species(
label = '[CH2]C12C=CC(CC1)OO2(3437)',
structure = SMILES('[CH2]C12C=CC(CC1)OO2'),
E0 = (117.079,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.2399,0.0282006,0.000107955,-1.58647e-07,6.03275e-11,14209.5,27.2421], Tmin=(100,'K'), Tmax=(1002.19,'K')), NASAPolynomial(coeffs=[22.0391,0.0268832,-1.23522e-05,2.72404e-09,-2.18161e-13,5937.8,-93.6165], Tmin=(1002.19,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(117.079,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(428.195,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(Cs-(Cds-Cds)CsCsOs) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + Estimated bicyclic component: polycyclic(s4_6_6_ane) - ring(12dioxane) - ring(Cyclohexane) + ring(36dihydro12dioxin) + ring(Cyclohexene) + radical(CJCOOH)"""),
)
species(
label = 'C=C1C=C[C](CC1)OO(3438)',
structure = SMILES('C=C1[CH]C=C(CC1)OO'),
E0 = (77.5973,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.08397,0.0383282,7.63788e-05,-1.22686e-07,4.7738e-11,9460.69,26.4907], Tmin=(100,'K'), Tmax=(992.223,'K')), NASAPolynomial(coeffs=[17.9777,0.034084,-1.37463e-05,2.73403e-09,-2.06063e-13,2964.67,-70.7209], Tmin=(992.223,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(77.5973,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(C=CCJC=C)"""),
)
species(
label = 'C=C1C=CC([CH]C1)OO(3439)',
structure = SMILES('C=C1C=CC([CH]C1)OO'),
E0 = (147.905,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.428025,0.0547771,3.70486e-05,-8.68131e-08,3.59498e-11,17938.4,27.2836], Tmin=(100,'K'), Tmax=(1006.97,'K')), NASAPolynomial(coeffs=[20.5458,0.0317569,-1.34093e-05,2.70064e-09,-2.03519e-13,11002.3,-84.2353], Tmin=(1006.97,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(147.905,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(CCJCOOH)"""),
)
species(
label = 'C=C1C=[C]C(CC1)OO(3440)',
structure = SMILES('C=C1C=[C]C(CC1)OO'),
E0 = (185.332,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.368583,0.0573616,2.8147e-05,-7.67058e-08,3.22021e-11,22440.8,25.6659], Tmin=(100,'K'), Tmax=(1013.1,'K')), NASAPolynomial(coeffs=[20.1558,0.0324677,-1.38091e-05,2.76613e-09,-2.06964e-13,15699.7,-83.5356], Tmin=(1013.1,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(185.332,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_S)"""),
)
species(
label = 'C=C1[CH]CC(C=C1)OO(3441)',
structure = SMILES('C=C1[CH]CC(C=C1)OO'),
E0 = (88.6026,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.611162,0.0474212,6.21958e-05,-1.14697e-07,4.61592e-11,10802.5,23.907], Tmin=(100,'K'), Tmax=(994.221,'K')), NASAPolynomial(coeffs=[21.2448,0.0307177,-1.26477e-05,2.57296e-09,-1.97085e-13,3422.29,-92.0008], Tmin=(994.221,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(88.6026,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Allyl_S)"""),
)
species(
label = 'C=C1[C]=CC(CC1)OO(3442)',
structure = SMILES('C=C1[C]=CC(CC1)OO'),
E0 = (146.485,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.312766,0.0594505,2.32457e-05,-7.21903e-08,3.09488e-11,17769.9,25.6813], Tmin=(100,'K'), Tmax=(1005.74,'K')), NASAPolynomial(coeffs=[19.7625,0.0331206,-1.35854e-05,2.66734e-09,-1.97648e-13,11277,-81.0936], Tmin=(1005.74,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(146.485,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(C=CJC=C)"""),
)
species(
label = '[CH]=C1C=CC(CC1)OO(3443)',
structure = SMILES('[CH]=C1C=CC(CC1)OO'),
E0 = (194.586,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3120,650,792.5,1650,2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.329408,0.0562803,3.62174e-05,-8.83955e-08,3.71101e-11,23557.1,26.4282], Tmin=(100,'K'), Tmax=(1001.04,'K')), NASAPolynomial(coeffs=[21.4852,0.0302989,-1.25895e-05,2.53965e-09,-1.92506e-13,16387.7,-90.3117], Tmin=(1001.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(194.586,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_P)"""),
)
species(
label = 'C=C1CCC2[CH]C1OO2(3450)',
structure = SMILES('C=C1CCC2[CH]C1OO2'),
E0 = (159.754,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.32731,0.034278,7.81332e-05,-1.18987e-07,4.50545e-11,19331.8,23.6805], Tmin=(100,'K'), Tmax=(1006.23,'K')), NASAPolynomial(coeffs=[16.2708,0.0358593,-1.51356e-05,3.03953e-09,-2.28213e-13,13237.1,-63.8474], Tmin=(1006.23,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(159.754,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s3_5_6_ane) + radical(CCJCOOH)"""),
)
species(
label = 'S(686)(685)',
structure = SMILES('[CH]1C=C2CCC1OOC2'),
E0 = (12.0126,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,2900,2950,3000,3050,3100,3150,900,925,950,975,1000,1025,1050,1075,1100,300,800,800,800,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4312.97,'J/mol'), sigma=(7.20998,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=673.68 K, Pc=26.11 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.975513,0.101072,-9.60461e-05,4.61804e-08,-8.79394e-12,1630.89,-2.03937], Tmin=(100,'K'), Tmax=(1271,'K')), NASAPolynomial(coeffs=[21.5663,0.0301311,-1.23248e-05,2.26742e-09,-1.56575e-13,-4099.32,-116.196], Tmin=(1271,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(12.0126,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)OsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + Estimated bicyclic component: polycyclic(PolycyclicRing) - ring(Cycloheptane) - ring(Cyclohexane) + ring(Cycloheptane) + ring(Cyclohexene) + radical(C=CCJCO)"""),
)
species(
label = 'C=C1[CH]C2OOC2CC1(3451)',
structure = SMILES('C=C1[CH]C2OOC2CC1'),
E0 = (150.205,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.844302,0.0469579,4.95944e-05,-9.20692e-08,3.59787e-11,18198.7,20.1768], Tmin=(100,'K'), Tmax=(1019.07,'K')), NASAPolynomial(coeffs=[17.2162,0.0370871,-1.59368e-05,3.17559e-09,-2.35667e-13,12037.6,-72.9748], Tmin=(1019.07,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(150.205,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_4_6_ane) + radical(C=CCJCO)"""),
)
species(
label = 'C=C1C=C=CCC1(1198)',
structure = SMILES('C=C1C=C=CCC1'),
E0 = (213.151,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.9368,0.02027,9.60388e-05,-1.36392e-07,5.21272e-11,25732.9,14.841], Tmin=(100,'K'), Tmax=(977.964,'K')), NASAPolynomial(coeffs=[15.9544,0.026172,-1.00047e-05,2.01432e-09,-1.55799e-13,19967.2,-67.9339], Tmin=(977.964,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(213.151,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cdd-CdsCds) + ring(Cyclohexane)"""),
)
species(
label = 'O(4)(4)',
structure = SMILES('[O]'),
E0 = (243.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (15.9994,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,29226.7,5.11107], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,29226.7,5.11107], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.005,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""O""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=C1C=CC([O])CC1(3454)',
structure = SMILES('C=C1C=CC([O])CC1'),
E0 = (106.346,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (109.146,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.29759,0.0312133,9.19019e-05,-1.41376e-07,5.53703e-11,12913,20.9012], Tmin=(100,'K'), Tmax=(977.918,'K')), NASAPolynomial(coeffs=[19.4788,0.0273743,-1.03907e-05,2.10784e-09,-1.6441e-13,5984.68,-83.6495], Tmin=(977.918,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(106.346,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(CC(C)OJ)"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ar(8)',
structure = SMILES('[Ar]'),
E0 = (-6.19426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,-745,4.3663], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,-745,4.3663], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-6.19426,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: BurkeH2O2"""),
)
transitionState(
label = 'TS1',
E0 = (164.803,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (511.702,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (441.913,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (458.163,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (549.129,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (514.479,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (558.383,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (274.973,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (370.405,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (119.717,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (164.472,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (232.003,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (286.509,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (148.657,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (179.526,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (346.107,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (180.246,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (161.665,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (150.205,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (224.178,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (278.152,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (349.351,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction83',
reactants = ['O2(2)(2)', 'C7H9(682)(681)'],
products = ['S(684)(683)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5.42928e+07,'m^3/(mol*s)'), n=0.107721, Ea=(5.76381,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [C_rad/H/CdCs;Y_rad] for rate rule [C_rad/H/CdCs;O2_birad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination"""),
)
reaction(
label = 'reaction84',
reactants = ['H(3)(3)', 'C=C1C=CC([CH]C1)O[O](3444)'],
products = ['S(684)(683)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(2e+13,'cm^3/(mol*s)','*|/',3.16), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""From training reaction 59 used for H_rad;C_rad/H/NonDeC
Exact match found for rate rule [C_rad/H/NonDeC;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction85',
reactants = ['H(3)(3)', 'C=C1C=C[C](CC1)O[O](3445)'],
products = ['S(684)(683)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(2.92e+13,'cm^3/(mol*s)'), n=0.18, Ea=(0.518816,'kJ/mol'), T0=(1,'K'), Tmin=(200,'K'), Tmax=(2000,'K'), comment="""Estimated using template [C_rad/OneDe;H_rad] for rate rule [C_rad/OneDeO;H_rad]
Euclidian distance = 1.0
family: R_Recombination"""),
)
reaction(
label = 'reaction86',
reactants = ['H(3)(3)', 'C=C1[CH]CC(C=C1)O[O](3446)'],
products = ['S(684)(683)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(2.71464e+07,'m^3/(mol*s)'), n=0.107721, Ea=(5.76381,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 36 used for C_rad/H/CdCs;H_rad
Exact match found for rate rule [C_rad/H/CdCs;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction87',
reactants = ['H(3)(3)', 'C=C1C=[C]C(CC1)O[O](3447)'],
products = ['S(684)(683)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(1e+13,'cm^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 40 used for Cd_rad/NonDe;H_rad
Exact match found for rate rule [Cd_rad/NonDe;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction88',
reactants = ['H(3)(3)', 'C=C1[C]=CC(CC1)O[O](3448)'],
products = ['S(684)(683)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(6.117e+14,'cm^3/(mol*s)'), n=-0.152, Ea=(4.19655,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 49 used for Cd_rad/Cd;H_rad
Exact match found for rate rule [Cd_rad/Cd;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction89',
reactants = ['H(3)(3)', '[CH]=C1C=CC(CC1)O[O](3449)'],
products = ['S(684)(683)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(1.21e+14,'cm^3/(mol*s)','+|-',4.82e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(298,'K'), comment="""From training reaction 60 used for H_rad;Cd_pri_rad
Exact match found for rate rule [Cd_pri_rad;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction92',
reactants = ['CC1=CC=C(CC1)O[O](3452)'],
products = ['S(684)(683)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(1.02873e+09,'s^-1'), n=1.23767, Ea=(163.714,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1_3_pentadiene;CH_end;unsaturated_end] for rate rule [1_3_pentadiene;CH3_1;unsaturated_end]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_ene_reaction"""),
)
reaction(
label = 'reaction95',
reactants = ['S(684)(683)'],
products = ['[O]OC1CCC2=CC1C2(3453)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(4.99998e+11,'s^-1'), n=0.0559095, Ea=(270.911,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1,3-butadiene_backbone;C=C_1;C=C_2] for rate rule [1,3-butadiene_backbone;CdH2_1;CdH(C)_2]
Euclidian distance = 1.41421356237
family: Intra_2+2_cycloaddition_Cd
Ea raised from 270.3 to 270.9 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction76',
reactants = ['S(684)(683)'],
products = ['[CH2]C12C=CC(CC1)OO2(3437)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(19155.5,'s^-1'), n=1.402, Ea=(20.2227,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7_SSSS_D;doublebond_intra_2H;radadd_intra] for rate rule [R7_SSSS_D;doublebond_intra_2H_secDe;radadd_intra_O]
Euclidian distance = 1.41421356237
family: Intra_R_Add_Exocyclic"""),
)
reaction(
label = 'reaction77',
reactants = ['S(684)(683)'],
products = ['C=C1C=C[C](CC1)OO(3438)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(2.3012e-19,'s^-1'), n=9.03667, Ea=(64.9775,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS;Y_rad_out;Cs_H_out_OneDe] for rate rule [R3H_SS_O;O_rad_out;Cs_H_out_(CdCdCd)]
Euclidian distance = 2.44948974278
family: intra_H_migration"""),
)
reaction(
label = 'reaction78',
reactants = ['C=C1C=CC([CH]C1)OO(3439)'],
products = ['S(684)(683)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(2960,'s^-1'), n=2.11, Ea=(84.0984,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 323 used for R4H_SSS;C_rad_out_H/NonDeC;O_H_out
Exact match found for rate rule [R4H_SSS;C_rad_out_H/NonDeC;O_H_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction79',
reactants = ['C=C1C=[C]C(CC1)OO(3440)'],
products = ['S(684)(683)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(1.286e+08,'s^-1'), n=1.323, Ea=(101.177,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_RSR;Cd_rad_out_Cd;XH_out] for rate rule [R4H_SSS;Cd_rad_out_Cd;O_H_out]
Euclidian distance = 2.2360679775
family: intra_H_migration"""),
)
reaction(
label = 'reaction80',
reactants = ['S(684)(683)'],
products = ['C=C1[CH]CC(C=C1)OO(3441)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(12044.4,'s^-1'), n=1.9, Ea=(49.162,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SSSS;Y_rad_out;Cs_H_out_H/Cd] for rate rule [R5H_SSSS_OCC;O_rad_out;Cs_H_out_H/Cd]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction81',
reactants = ['C=C1[C]=CC(CC1)OO(3442)'],
products = ['S(684)(683)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(136000,'s^-1'), n=1.9199, Ea=(33.0402,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H;Cd_rad_out_single;XH_out] for rate rule [R5H_DSSS;Cd_rad_out_singleDe_Cd;O_H_out]
Euclidian distance = 3.74165738677
family: intra_H_migration"""),
)
reaction(
label = 'reaction82',
reactants = ['[CH]=C1C=CC(CC1)OO(3443)'],
products = ['S(684)(683)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(1.86943e+06,'s^-1'), n=1.85754, Ea=(151.521,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_singleH;XH_out] for rate rule [R7H;Cd_rad_out_singleH;O_H_out]
Euclidian distance = 2.2360679775
family: intra_H_migration"""),
)
reaction(
label = 'reaction90',
reactants = ['S(684)(683)'],
products = ['C=C1CCC2[CH]C1OO2(3450)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(9.9e+10,'s^-1'), n=0.06, Ea=(80.7512,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn2cx_beta;doublebond_intra_pri_HCd;radadd_intra] for rate rule [Rn2c6_beta_short;doublebond_intra_pri_HCd;radadd_intra_O]
Euclidian distance = 1.41421356237
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction74',
reactants = ['S(684)(683)'],
products = ['S(686)(685)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(8.62196e+06,'s^-1'), n=0.867572, Ea=(62.1704,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7_linear;doublebond_intra;radadd_intra] for rate rule [R7_linear;doublebond_intra_secDe_2H;radadd_intra_O]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction91',
reactants = ['S(684)(683)'],
products = ['C=C1[CH]C2OOC2CC1(3451)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1.99832e+10,'s^-1'), n=0.37247, Ea=(50.7104,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6plus;doublebond_intra_pri;radadd_intra] for rate rule [Rn2c6_alpha_long;doublebond_intra_pri;radadd_intra_O]
Euclidian distance = 3.16227766017
family: Intra_R_Add_Endocyclic
Ea raised from 50.4 to 50.7 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction93',
reactants = ['S(684)(683)'],
products = ['HO2(8)(9)', 'C7H8(690)(689)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(8.00406e+10,'s^-1'), n=0.563333, Ea=(124.683,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2OO_HNd] for rate rule [R2OO_HNd_HDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: HO2_Elimination_from_PeroxyRadical"""),
)
reaction(
label = 'reaction94',
reactants = ['S(684)(683)'],
products = ['HO2(8)(9)', 'C=C1C=C=CCC1(1198)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(3.63e+09,'s^-1'), n=1.11, Ea=(178.657,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using an average for rate rule [R2OO_0H]
Euclidian distance = 0
family: HO2_Elimination_from_PeroxyRadical"""),
)
reaction(
label = 'reaction96',
reactants = ['O(4)(4)', 'C=C1C=CC([O])CC1(3454)'],
products = ['S(684)(683)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(1355.7,'m^3/(mol*s)'), n=1.40819, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 3 used for O_rad/NonDe;O_birad
Exact match found for rate rule [O_rad/NonDe;O_birad]
Euclidian distance = 0
family: Birad_R_Recombination
Ea raised from -12.0 to 0 kJ/mol."""),
)
network(
label = '173',
isomers = [
'S(684)(683)',
],
reactants = [
('O2(2)(2)', 'C7H9(682)(681)'),
('HO2(8)(9)', 'C7H8(690)(689)'),
],
bathGas = {
'Ne': 0.333333,
'N2': 0.333333,
'Ar(8)': 0.333333,
},
)
pressureDependence(
label = '173',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"qin.she@husky.neu.edu"
] | qin.she@husky.neu.edu |
9f935df7a693a88e5ff198c8cdeb82c876498221 | 46404c77e04907225475e9d8be6e0fd33227c0b1 | /max value of exp.py | 97a9e0dd3173d1d935cda977191f6d3427639305 | [] | no_license | govardhananprabhu/DS-task- | 84b46e275406fde2d56c301fd1b425b256b29064 | bf54f3d527f52f61fefc241f955072f5ed9a6558 | refs/heads/master | 2023-01-16T07:41:27.064836 | 2020-11-27T11:52:50 | 2020-11-27T11:52:50 | 272,928,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | """
Given an algebraic expression of the form (x1 + x2 + x3 + . . . + xn) * (y1 + y2 + . . . + ym) and
(n + m) integers. Find the maximum value of the expression using the given
integers.
Consstraint :
n <= 50
m <= 50
-50 <= x1, x2, .. xn <= 50
H 6
T 2000
Tag cisco mathematics
In des
First line contains 2 space separated integers n,m, denotes the count of integers.
Second line contains n+m space separated integers.
Ot des
Print the max value
2 2
1 2 3 4
25
3 1
1 2 3 4
24
5 4
1 3 2 5 4 88 12 21 11
4982
1 1
11 10
110
3 3
1 4 22 1 33 2
980
Exp
The expression is (x1 + x2) * (y1 + y2) and
the given integers are 1, 2, 3 and 4. Then
maximum value is (1 + 4) * (2 + 3) = 25
Hint
A simple solution is to consider all possible combinations of n numbers and remaining m numbers and calculating their values, from which maximum value can be derived.
"""
def MaxValues(arr, n, m) :
sum = 0
INF = 1000000000
MAX = 50
for i in range(0, (n + m)) :
sum += arr[i]
arr[i] += 50
dp = [[0 for x in range(MAX * MAX + 1)]
for y in range( MAX + 1)]
dp[0][0] = 1
for i in range(0, (n + m)) :
for k in range(min(n, i + 1), 0, -1) :
for j in range(0, MAX * MAX + 1) :
if (dp[k - 1][j]) :
dp[k][j + arr[i]] = 1
max_value = -1 * INF
for i in range(0, MAX * MAX + 1) :
if (dp[n][i]) :
temp = i - 50 * n
max_value = max(max_value, temp * (sum - temp))
print(max_value)
n,m=map(int,input().split())
arr = list(map(int,input().split()))
MaxValues(arr, n, m)
| [
"noreply@github.com"
] | govardhananprabhu.noreply@github.com |
0b9eebfc0db17524b69f3646238b25cf55e6c715 | 714a8942a8a761d4ff1aa5cc38f68cd414da295d | /django_restful/wsgi.py | 5f86f0839de9ded1feb6f165af75437128cbd607 | [] | no_license | eliblurr/django-restful | 9e9a1188242439a2486a18a3d6f8dab3f9be4952 | 0fdf688d41ad32a5a63cf34680e6a96c572e5337 | refs/heads/main | 2023-06-04T08:18:25.286212 | 2021-06-23T20:36:39 | 2021-06-23T20:36:39 | 379,537,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for django_restful project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_restful.settings')
application = get_wsgi_application()
| [
"segbawuel@aiti-kace.com"
] | segbawuel@aiti-kace.com |
3ef7f37ef8d1957487abd9ace0dad6904448428b | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Python 300/09. Function/227.py | 42218bd6f628a285d7163e3e929121048f5fcec1 | [] | no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | #functions _ 함수 정의
def print_mxn(line, n):
num = int(len(line)/n)
for i in range(num + 1):
print(line[i * n : i * n + n])
print_mxn("가나다라마바사아자차", 3) | [
"skfls2618@naver.com"
] | skfls2618@naver.com |
f146ecaa2404f714b15e20fe8f193a13c2fd7061 | c6ed9aa97166d4778b89321b580af80c543bacc9 | /randoms/kthlargest.py | 8869a6258042894d7e85fa2c7e867ca9be147be2 | [] | no_license | bradyz/sandbox | 381bcaf2f3719dee142a00858f7062aeff98d1ab | ff90335b918886d5b5956c6c6546dbfde5e7f5b3 | refs/heads/master | 2021-01-23T09:03:54.697325 | 2018-02-27T20:47:48 | 2018-02-27T20:47:48 | 21,292,856 | 10 | 0 | null | 2015-09-03T16:53:15 | 2014-06-28T00:29:18 | Python | UTF-8 | Python | false | false | 1,059 | py | from random import randrange
def kth(a, k):
def pivot(s, e):
val = a[s]
left = s
right = e
while left < right:
while left < right and a[left] <= val:
left += 1
while a[right] > val:
right -= 1
if left < right:
a[left], a[right] = a[right], a[left]
a[s] = a[right]
a[right] = val
return right
l = len(a)
idx = 0
while idx != l - k:
tmp = pivot(idx, l-1)
print("tmp: " + str(tmp) + " val: " + str(a[tmp]))
if tmp > l - k + 1:
idx -= 1
else:
idx += 1
print(a)
return a[-k:]
if __name__ == "__main__":
arr = [randrange(100) for _ in range(10)]
el = 2
print(str(el) + " elements")
print(arr)
print(kth(arr, el))
# t = int(input())
# for _ in range(t):
# el = int(input())
# arr = [int(val) for val in raw_input().split()]
# print(el)
# print(arr)
# print(kth(arr, el))
| [
"brady.zhou@utexas.edu"
] | brady.zhou@utexas.edu |
0ce72dc9c9ccb2c0f733aab21a0e31e378fc45d4 | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/AlipayOpenPublicMessageContentCreateRequest.py | 72e8bc46aeb5bc88d6817b2321843ee2afd36b85 | [
"Apache-2.0"
] | permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 4,016 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenPublicMessageContentCreateModel import AlipayOpenPublicMessageContentCreateModel
class AlipayOpenPublicMessageContentCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenPublicMessageContentCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenPublicMessageContentCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.public.message.content.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
dc89937d05510bc33d593090df17b6f5fabdfb40 | ef0d8fd55fbdb526e20d6c2b05e601f1d86587c5 | /frappe/utils/bench_helper.py | b920347f7af0e8d5ce3bb5003349a9293c47e97f | [
"MIT"
] | permissive | indictranstech/v4_frappe | 8976e84c14346196b8895ad6274740dca7fd6504 | dba708c8aa83f503b9f4a264850307111a2b5f19 | refs/heads/master | 2021-09-26T12:26:29.994294 | 2018-10-30T06:09:36 | 2018-10-30T06:09:36 | 103,262,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | from __future__ import unicode_literals
import click
import frappe
import importlib
def main():
click.Group(commands=get_app_groups())()
def get_cli_options():
pass
def get_app_groups():
ret = {}
for app in get_apps():
app_group = get_app_group(app)
if app_group:
ret[app] = app_group
return ret
def get_app_group(app):
app_commands = get_app_commands(app)
if app_commands:
return click.Group(name=app, commands=app_commands)
def get_app_commands(app):
try:
app_command_module = importlib.import_module(app + '.commands')
except ImportError:
return []
ret = {}
for command in getattr(app_command_module, 'commands', []):
ret[command.name] = command
return ret
def get_apps():
return frappe.get_all_apps(with_internal_apps=False, sites_path='.')
if __name__ == "__main__":
main()
| [
"sagarshiragawakar@gmail.com"
] | sagarshiragawakar@gmail.com |
263cb4742e4e6ddcd2bb7b0b9ffff06d93e4e06d | 3c6b3b0a92e5a290ba69d0f73af51ac82aff3509 | /assignments/development/chess-top-100-p2.py | a0f31d9eb1b9a23a30afa30fb6798ba02ba27b67 | [] | no_license | sarae17/2019-T-111-PROG | ba6c6db7075acba16bbcd23e4c0d3db6e2bb374f | 017287b3300ec4fe809bfc81fee856ffb17b4800 | refs/heads/master | 2020-09-10T14:36:53.715479 | 2019-11-13T13:41:04 | 2019-11-13T13:41:04 | 221,722,173 | 1 | 0 | null | 2019-11-14T14:54:15 | 2019-11-14T14:54:14 | null | UTF-8 | Python | false | false | 3,092 | py | # The following constants indicate the position of the respective
# fields in the tuple stored as the value for the key in the players dictionary
RANK = 0
COUNTRY = 1
RATING = 2
BYEAR = 3
def open_file(filename):
''' Open the given file name and returns the corresponding file stream, or None if the file cannot be opened '''
try:
file_stream = open(filename, 'r')
return file_stream
except FileNotFoundError:
return None
def create_players_dict(file_stream):
''' Reads the given file stream and returns a dictionary in which
the name of a chess player is the key, the value is a tuple: (rank, country, rating, b-year)
'''
the_dict = {}
for line in file_stream: # process each line
rank, name, country, rating, byear = line.split(';')
# The name is one field separated by ","
lastname, firstname = name.split(",")
# Strip leading spaces
firstname = firstname.strip()
lastname = lastname.strip()
country = country.strip()
key = "{} {}".format(firstname, lastname)
value_tuple = (int(rank), country, int(rating), int(byear))
the_dict[key] = value_tuple
return the_dict
def create_dict_with_key(dict_players, attribute_key):
''' Uses a players dictionary to create a dictionary
in which an attribute in the values of dict_players are keys and a list of player names are values
'''
the_dict = {}
for chess_player, chess_player_data in dict_players.items():
key = chess_player_data[attribute_key]
if key in the_dict:
name_list = the_dict[key]
name_list.append(chess_player)
else:
name_list = [chess_player]
the_dict[key] = name_list
return the_dict
def get_average_rating(players, dict_players):
''' Returns the average ratings for the given players'''
ratings = [ dict_players[player][RATING] for player in players]
average = sum(ratings)/len(ratings)
return average
def print_sorted(the_dict, dict_players):
''' Prints information sorted on the key of the_dict '''
sorted_dict = sorted(the_dict.items())
for key, players in sorted_dict:
average_rating = get_average_rating(players, dict_players)
print("{} ({}) ({:.1f}):".format(key, len(players), average_rating))
for player in players:
rating = dict_players[player][RATING]
print("{:>40}{:>10d}".format(player, rating))
def print_header(header_str):
print(header_str)
dashes = '-' * len(header_str)
print(dashes)
# The main program starts here
filename = input("Enter filename: ")
file_stream = open_file(filename)
if file_stream:
dict_players = create_players_dict(file_stream)
dict_countries = create_dict_with_key(dict_players, COUNTRY)
dict_years = create_dict_with_key(dict_players, BYEAR)
print_header("Players by country:")
print_sorted(dict_countries, dict_players)
print()
print_header("Players by birth year:")
print_sorted(dict_years, dict_players) | [
"hrafnl@gmail.com"
] | hrafnl@gmail.com |
3a8d3609a8249080335168e1e422f79f390a3655 | 26762585d08aa774af9f104472c97a8c7a9df181 | /generators/old/gridgen8.py | c386a48438966ef12211b7014278750253ae8e3f | [] | no_license | OxfordSKA/SKA1-low-layouts | 379fbe5c056dc73706b1073f09e485880ecfa180 | 49e3ba2af4a447be38af03dde1d11898e3f8300b | refs/heads/master | 2021-01-17T17:10:41.469929 | 2016-08-12T10:48:24 | 2016-08-12T10:48:24 | 47,823,977 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,177 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy
from numpy.random import rand, seed
from math import ceil, log, exp, floor
import matplotlib.pyplot as pyplot
def grid_position(x, y, scale, r):
jx = int(floor((x + r) * scale))
jy = int(floor((y + r) * scale))
return jx, jy
def grid_position_2(x, y, scale, grid_size):
jx = int(round(x * scale)) + grid_size / 2
jy = int(round(y * scale)) + grid_size / 2
return jx, jy
def get_trail_position(r):
x = -r + 2.0 * r * rand()
y = -r + 2.0 * r * rand()
return x, y
def norm_pdf(x, sigma):
return exp(-(x**2) / (2.0*sigma**2))
def gridgen8(edge_density, num_points, diameter, min_dist, n_miss_max=1000):
"""Generate uniform random positions within a specified diameter which
are no closer than a specified minimum distance.
Uses and algorithm where the area is split into a grid sectors
so that when checking for minimum distance, only nearby points need to be
considered.
"""
# Fix seed to study closest match fails (with fixed seed can
# print problematic indices)
# seed(2)
num_points = 50000
r = diameter / 2.0 # Radius
p = 1.0 / edge_density
max_dist = p * min_dist
sigma = r / log(p)**0.5
scale_max = 1.0 / norm_pdf(diameter / 2.0, sigma)
edge_dist = (1.0 / norm_pdf(20, sigma)) * min_dist
print('- Edge dist:', edge_dist)
print('- Area scaling: %f' % (edge_dist**2 / min_dist**2))
# Grid size and scaling onto the grid
grid_size = min(100, int(round(float(diameter) / max_dist)))
grid_size += grid_size%2
grid_cell = float(diameter) / grid_size # Grid sector cell size
scale = 1.0 / grid_cell # Scaling onto the sector grid.
check_width = 1
print('- Station d: %f' % diameter)
print('- Grid size: %i' % grid_size)
print('- Min dist: %f' % min_dist)
print('- Max dist: %f' % max_dist)
print('- Sigma: %f' % sigma)
print('- Grid cell: %f' % grid_cell)
print('- check width: %i' % check_width)
# Pre-allocate coordinate arrays
x = numpy.zeros(num_points)
y = numpy.zeros(num_points)
# Grid meta-data
# First index in the grid
grid_i_start = numpy.zeros((grid_size, grid_size), dtype='i8')
# Last index in the grid
grid_i_end = numpy.zeros((grid_size, grid_size), dtype='i8')
# Points in grid cell.
grid_count = numpy.zeros((grid_size, grid_size), dtype='i8')
# Next coordinate index.
grid_next = numpy.zeros(num_points, dtype='i8')
n = num_points
n_req = num_points
num_miss = 0
max_num_miss = 0
j = 0
space_remaining = True
while space_remaining:
done = False
while not done:
# Generate a trail position
xt, yt = get_trail_position(r)
rt = (xt**2 + yt**2)**0.5
ant_r = min_dist / (2.0 * norm_pdf(rt, sigma))
# Check if the point is inside the diameter.
# if rt + ant_r > r:
# num_miss += 1
if rt + min_dist / 2.0 > r:
num_miss += 1
# Check if min distance is met.
else:
jx, jy = grid_position(xt, yt, scale, r)
y0 = max(0, jy - check_width)
y1 = min(grid_size, jy + check_width + 1)
x0 = max(0, jx - check_width)
x1 = min(grid_size, jx + check_width + 1)
dmin = diameter # Set initial min to diameter.
for ky in range(y0, y1):
for kx in range(x0, x1):
if grid_count[kx, ky] > 0:
i_other = grid_i_start[kx, ky]
for num_other in range(grid_count[kx, ky]):
dx = xt - x[i_other]
dy = yt - y[i_other]
dr = (dx**2 + dy**2)**0.5
r_other = (x[i_other]**2 + y[i_other]**2)**0.5
ant_r_other = min_dist / (2.0 * norm_pdf(r_other, sigma))
if dr - ant_r_other <= dmin:
dmin = dr - ant_r_other
i_other = grid_next[i_other]
scaled_min_dist_3 = (min_dist / 2.0) / norm_pdf(rt, sigma)
if dmin >= scaled_min_dist_3:
x[j] = xt
y[j] = yt
if grid_count[jx, jy] == 0:
grid_i_start[jx, jy] = j
else:
grid_next[grid_i_end[jx, jy]] = j
grid_i_end[jx, jy] = j
grid_count[jx, jy] += 1
print(j, num_miss)
max_num_miss = max(max_num_miss, num_miss)
num_miss = 0
done = True
j += 1
else:
num_miss += 1
if num_miss >= n_miss_max:
n = j - 1
done = True
if num_miss >= n_miss_max or j >= num_points:
max_num_miss = max(max_num_miss, num_miss)
break
if n < n_req:
x = x[0:n]
y = y[0:n]
print('- Found %i / %i points [max. misses: %i / %i]' %
(n, n_req, max_num_miss, n_miss_max))
return x, y, sigma
if __name__ == '__main__':
# FIXME-BM think about relation of area and minimum spacing...
# FIXME-BM based on amplitude taper (apodisation) work out how many antennas
# FIXME-BM try to fit antenna in largest empty space each time?
# FIXME-BM keep putting more antennas until fail ...
# TODO-BM for a fixed seed see how number of tries effects numbers of
# antennas placed.
# are effectively lost. ie:
# round(sum(ant) - sum(ant * weights)) = 256 - 200 == 56
# n should equal round(sum(ant * apod. weights))
n = 0
d = 35
d_min = 1.5
edge_density = 0.5 # w.r.t. centre.
num_tries = 500000
x, y, sigma = gridgen8(edge_density, n, d, d_min, n_miss_max=num_tries)
num_points = len(x)
print('Plotting...')
taper_x = numpy.linspace(-d / 2.0, d / 2.0, 20)
taper_y = numpy.zeros_like(taper_x)
for ix, x_ in enumerate(taper_x):
# taper_y[ix] = d_min / (2.0 * norm_pdf(x_, sigma))
taper_y[ix] = norm_pdf(x_, sigma)
# fig = pyplot.figure(figsize=(10, 10))
# fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95,
# wspace=0.0, hspace=0.0)
# ax = fig.add_subplot(111)
# ax.plot(taper_x, taper_y, '--')
# pyplot.show()
fig = pyplot.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95,
wspace=0.0, hspace=0.0)
ax = fig.add_subplot(111, aspect='equal')
example_tries_x = -d/2.0 + d * rand(num_tries)
example_tries_y = -d/2.0 + d * rand(num_tries)
ax.plot(example_tries_x, example_tries_y, 'k+', alpha=0.1)
ax.plot(x, y, '.', color='k', ms=3.0)
circle = pyplot.Circle((0, 0), d / 2.0, color='k',
linestyle='--', fill=False)
ax.add_artist(circle)
for i in range(num_points):
xp = x[i]
yp = y[i]
rp = (xp**2 + yp**2)**0.5 # Radius of this point
dx = x - x[i]
dy = y - y[i]
dist = (dx**2 + dy**2)**0.5
i_min = numpy.where(dist == dist[dist != 0].min())[0][0]
min_dist = dist[i_min]
ro = (x[i_min]**2 + y[i_min]**2)**0.5 # Radius of closest point
# Min dist radius for this point + that of closest defines
# defines if antennas overlap.
r_ant_this = d_min / (2.0 * norm_pdf(rp, sigma))
r_ant_closest = d_min / (2.0 * norm_pdf(ro, sigma))
ox = x[i_min] - xp
oy = y[i_min] - yp
ax.arrow(xp, yp, ox, oy, head_width=0.1, head_length=0.05,
fc='g', ec='g')
ax.text(xp, yp, '%i' % i, fontsize='x-small')
if min_dist >= r_ant_this + r_ant_closest:
color = 'b'
else:
print(i, min_dist, r_ant_this, r_ant_closest,
r_ant_this + r_ant_closest)
color = 'r'
circle = pyplot.Circle((xp, yp), r_ant_this, color=color,
fill=False, alpha=0.1)
ax.add_artist(circle)
circle = pyplot.Circle((xp, yp), (d_min / 2.0), color=color,
fill=True, alpha=0.2)
ax.add_artist(circle)
extent = d_min / 2**0.5
xp = xp
yp -= extent / 2.0 * 2 ** 0.5
angle = 45.0
# xp = xp - extent / 2.0
# yp = yp - extent / 2.0
# angle = 0.0
rect = pyplot.Rectangle((xp, yp),
width=extent, height=extent,
angle=angle, color=color, linestyle='-',
fill=True, alpha=0.4)
ax.add_artist(rect)
ax.set_title('%i' % (len(x)))
ax.set_xlim(-(d / 2.0 + d_min / 2.0), d / 2.0 + d_min / 2.0)
ax.set_ylim(-(d / 2.0 + d_min / 2.0), d / 2.0 + d_min / 2.0)
pyplot.show()
| [
"benjamin.mort@oerc.ox.ac.uk"
] | benjamin.mort@oerc.ox.ac.uk |
2aee15990e576ff4d1f8ba52fe34188c61efc469 | 056adbbdfb968486ecc330f913f0de6f51deee33 | /277-find-the-celebrity/find-the-celebrity.py | 5d5b0edbef91abbf8d99870dd9924f1dea1c0472 | [] | no_license | privateHmmmm/leetcode | b84453a1a951cdece2dd629c127da59a4715e078 | cb303e610949e953b689fbed499f5bb0b79c4aea | refs/heads/master | 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | # -*- coding:utf-8 -*-
#
# Suppose you are at a party with n people (labeled from 0 to n - 1) and among them, there may exist one celebrity. The definition of a celebrity is that all the other n - 1 people know him/her but he/she does not know any of them.
#
#
#
# Now you want to find out who the celebrity is or verify that there is not one. The only thing you are allowed to do is to ask questions like: "Hi, A. Do you know B?" to get information of whether A knows B. You need to find out the celebrity (or verify there is not one) by asking as few questions as possible (in the asymptotic sense).
#
#
#
# You are given a helper function bool knows(a, b) which tells you whether A knows B. Implement a function int findCelebrity(n), your function should minimize the number of calls to knows.
#
#
#
# Note: There will be exactly one celebrity if he/she is in the party. Return the celebrity's label if there is a celebrity in the party. If there is no celebrity, return -1.
#
# The knows API is already defined for you.
# @param a, person a
# @param b, person b
# @return a boolean, whether a knows b
# def knows(a, b):
class Solution(object):
def findCelebrity(self, n):
"""
:type n: int
:rtype: int
"""
possible = 0
for i in range(1, n):
if knows(possible, i):
possible = i
for i in range(0, n):
if possible != i and (not knows(i, possible) or knows(possible, i)):
return -1
# if possible != i and (not knows(i, possible)):
# return -1
return possible
| [
"hyan90@ucsc.edu"
] | hyan90@ucsc.edu |
3190629752516f8a00989529c3e5b4122ecccdc3 | c2c813717d1ab5df2e912d510595e71eb26d505a | /mineralization/clean code/test_M2-M2_convert.py | c9e69fda0456cdcb52c1b39fb8a98d84aba9b61d | [] | no_license | danielrgreen/toothmin | 7dfaa17dea34c6b42b8196652fb0d1ebcaf4b798 | 307d675225c69340745454ba220df1a5c4089d7a | refs/heads/master | 2020-12-24T16:34:07.595761 | 2017-11-08T19:07:15 | 2017-11-08T19:07:15 | 13,531,025 | 0 | 0 | null | 2014-07-25T19:47:58 | 2013-10-12T23:44:17 | Python | UTF-8 | Python | false | false | 5,230 | py | # Daniel Green, Gregory Green, 2014
# drgreen@fas.harvard.edu
# Human Evolutionary Biology
# Center for Astrophysics
# Harvard University
#
# Mineralization Model Re-Size:
# this code takes a larger mineralization model
# and produces images demonstrating mineral density
# increase over time, total density over time, or
# calculates final isotope distributions at full
# or partial resolution.
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
import scipy.special as spec
from time import time
def tooth_timing_convert_curv2lin(conversion_times, a1, s1, o1, max1, s2, o2, max2):
t1_ext = a1*spec.erf(s1*(conversion_times-o1))+(max1-a1)
t1_pct = t1_ext / max1
t2_ext = t1_pct * max2
converted_times = (t2_ext-o2)/s2
return converted_times
def tooth_timing_convert_lin2curv(conversion_times, s1, o1, max1, a2, s2, o2, max2):
t1_ext = (s1*conversion_times)+o1
t1_pct = t1_ext / max1
t2_ext = t1_pct * max2
converted_times = (spec.erfinv((a2+t2_ext-max2)/a2) + (o2*s2)) / s2
return converted_times
def tooth_timing_convert(conversion_times, a1, s1, o1, max1, a2, s2, o2, max2):
'''
Takes an array of events in days occurring in one tooth, calculates where
these will appear spatially during tooth extension, then maps these events
onto the spatial dimensions of a second tooth, and calculates when similar
events would have occurred in days to produce this mapping in the second
tooth.
Inputs:
conversion_times: a 1-dimensional numpy array with days to be converted.
a1, s1, o1, max1: the amplitude, slope, offset and max height of the error
function describing the first tooth's extension, in mm,
over time in days.
a2, s2, o2, max2: the amplitude, slope, offset and max height of the error
function describing the second tooth's extension, in mm,
over time in days.
Returns: converted 1-dimensional numpy array of converted days.
'''
t1_ext = a1*spec.erf(s1*(conversion_times-o1))+(max1-a1)
t1_pct = t1_ext / max1
t2_ext = t1_pct * max2
converted_times = (spec.erfinv((a2+t2_ext-max2)/a2) + (o2*s2)) / s2
return converted_times
def spline_input_signal(iso_values, value_days, smoothness):
'''
Takes a series of iso_values, each lasting for a number of days called value_days,
and interpolates to create a water history of the appropriate length iso_values*value_days.
Has blood and water data from sheep 962 arranged from birth and outputs a
day-by-day spline-smoothed version.
'''
spline_data_days = np.arange(np.size(iso_values))*value_days
spline_output = InterpolatedUnivariateSpline(spline_data_days, iso_values, k=smoothness)
days = np.arange(value_days*np.size(iso_values))
water_spl = spline_output(days)
return water_spl[:584]
def main():
m1_m2_params = np.array([21.820, .007889, 29.118, 35., 67.974, 0.003352, -25.414, 41.]) # 'synch86', outlier, 100k
m2_m1_params = np.array([67.974, 0.003352, -25.414, 41., 21.820, .007889, 29.118, 35.]) # 'synch86', outlier, 100k
m2_m2_params_curv2lin = np.array([67.974, 0.003352, -25.414, 41., (41./416.), -8.3, 41.]) # 'synch86', outlier, 100k
daily_d18O_360 = 10.*np.sin((2*np.pi/360.)*(np.arange(600.)))-11.
daily_d18O_180 = 10.*np.sin((2*np.pi/180.)*(np.arange(600.)))-11.
daily_d18O_090 = 10.*np.sin((2*np.pi/90.)*(np.arange(600.)))-11.
daily_d18O_045 = 10.*np.sin((2*np.pi/45.)*(np.arange(600.)))-11.
days = np.arange(84., 684.)
converted_days = tooth_timing_convert_curv2lin(days, *m2_m2_params_curv2lin)
M2_test1 = np.ones(days.size)
M2_test1[:] = 5.
M2_test1[50:100] = 15.
M2_test1[150:200] = 25.
M2_test1[250:300] = 35.
M2_test1[350:400] = 45.
M2_test1[450:500] = 55.
M1_test1_tmp = np.ones(converted_days.size)
for k,d in enumerate(converted_days):
print k,d
d = int(d)
M1_test1_tmp[d:] = M2_test1[k]
M1_test1 = M1_test1_tmp
M1_test1 = M1_test1[84:]
print 'days =', days
print 'converted days =', converted_days
print 'm2 = ', M2_test1
print 'm1 = ', M1_test1
t_save = time()
print days.size, M1_test1.size, M2_test1.size, days[:-84].size
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax1text = 'M2->M2, M2_days start@84, M2/M2 plotted w/diff day_arrays'
ax1.text(0, 50, ax1text, fontsize=8)
ax1.plot(days, M2_test1, 'k--', linewidth=1.0)
ax1.plot(converted_days[:-84], M1_test1, 'b-', linewidth=1.0)
ax1.set_ylim(-5, 65)
ax1.set_xlim(-50, 600)
ax1 = fig.add_subplot(2,1,2)
ax1text = 'M2->M2, M2_days start@84, M2/M2 plotted on same'
ax1.text(0, 50, ax1text, fontsize=8)
ax1.plot(np.arange(np.size(M2_test1)), M2_test1, 'k--', linewidth=1.0)
ax1.plot(np.arange(np.size(M1_test1)), M1_test1, 'b-', linewidth=1.0)
ax1.set_ylim(-5, 65)
ax1.set_xlim(-50, 600)
fig.savefig('M2-M2_convert_testing_{0}.svg'.format(t_save), dpi=300, bbox_inches='tight')
plt.show()
return 0
if __name__ == '__main__':
main()
| [
"drgreen@fas.harvard.edu"
] | drgreen@fas.harvard.edu |
e85108788bcb1f5b261f9714053c33ce7a2fa359 | 3f9cfe467c5e5328704188067b15332cc3021f02 | /servic/admin.py | 3795081c8e29739b6ab59c1c4aabff3b616cbdf4 | [] | no_license | mamee93/website-text | c7423cfae9f35c52e2fdc19220912050a76ef013 | e9decd32319b4d1abf39d64dc833d9262b85703a | refs/heads/master | 2022-12-05T00:45:26.004178 | 2020-08-27T09:39:13 | 2020-08-27T09:39:13 | 290,735,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.contrib import admin
from .models import Services
# Register your models here.
admin.site.register(Services) | [
"mameeal@gmail.com"
] | mameeal@gmail.com |
6eae2990fa0e566303822d2384af4be325e85c9e | 8d49df8fd04ef5cc5123b956470ab70344d39cc7 | /crash_course/ch17/python_repos.py | cb63fa657aecd488a86f9c05b5e7f4f98cea46e5 | [
"BSD-3-Clause"
] | permissive | dantin/python-by-example | 314c1d97bb527f65e5ada59ee1a72d6df4d881b3 | 5769c7a332ebd60fd54e477b6813f2f2a0f3f37f | refs/heads/master | 2022-12-10T04:49:52.771288 | 2019-03-05T03:41:02 | 2019-03-05T03:41:02 | 143,107,516 | 0 | 0 | BSD-3-Clause | 2022-12-08T02:47:35 | 2018-08-01T05:21:53 | Python | UTF-8 | Python | false | false | 1,487 | py | # -*- coding: utf-8 -*-
import pygal
import requests
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
# Make an API call and store the response.
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
r = requests.get(url)
print('Status code:', r.status_code)
# Store API response in a variable.
response_dict = r.json()
print('Total repositories:', response_dict['total_count'])
# Explore information about the repositories.
repo_dicts = response_dict['items']
names, plot_dicts = [], []
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
# Get the project description, if one is available.
description = repo_dict['description']
if not description:
description = 'No description provided.'
plot_dict = {
'value': repo_dict['stargazers_count'],
'label': str(description),
'xlink': repo_dict['html_url'],
}
plot_dicts.append(plot_dict)
# Make visualization.
my_style = LS('#333366', base_style=LCS)
my_style.title_font_size = 24
my_style.label_font_size = 14
my_style.major_label_font_size = 18
my_config = pygal.Config()
my_config.x_label_rotation = 45
my_config.show_legend = False
my_config.tuncate_label = 15
my_config.show_y_guides = False
my_config.width = 1000
chart = pygal.Bar(my_config, style=my_style)
chart.title = 'Most-Starred Python Projects on GitHub'
chart.x_labels = names
chart.add('', plot_dicts)
chart.render_to_file('python_repos.svg')
| [
"noreply@github.com"
] | dantin.noreply@github.com |
fa07e2b4fabebab106934800e1c21dbd1ebddb1a | abfa0fcab2bc9a9c3cccbc3a8142cdd4b2a66ee9 | /251-Flatten 2D Vector.py | f488e83697174a77d26799a5b6cad491014a04f2 | [] | no_license | JinnieJJ/leetcode | 20e8ccf3f8919028c53e0f0db86bcc2fbc7b6272 | 26c6ee936cdc1914dc3598c5dc74df64fa7960a1 | refs/heads/master | 2021-04-15T09:18:08.450426 | 2021-03-06T01:53:27 | 2021-03-06T01:53:27 | 126,275,814 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | class Vector2D(object):
def __init__(self, vec2d):
"""
Initialize your data structure here.
:type vec2d: List[List[int]]
"""
self.stack = vec2d
self.i = 0
self.j = -1
def next(self):
"""
:rtype: int
"""
return self.stack[self.i][self.j]
def hasNext(self):
"""
:rtype: bool
"""
if not self.stack:
return False
self.j += 1
while True:
if self.j < len(self.stack[self.i]):
return True
self.i += 1
if self.i >= len(self.stack):
return False
self.j = 0
# Your Vector2D object will be instantiated and called as such:
# i, v = Vector2D(vec2d), []
# while i.hasNext(): v.append(i.next())
| [
"noreply@github.com"
] | JinnieJJ.noreply@github.com |
b9462913cf8d146f69d2293389e30e4c52a4d3dc | 8e0f89612fb278c07cd871ea29685917caec4e0d | /second_trik_bubble_short.py | a3f219a8bac5489cd00249ab9006136a0bb42ad7 | [] | no_license | amritat123/list_Questions | 169836e87863f1d97776a9f9da0dd14e0512ac8a | d98982fd16a2165d7fd4bea8c89014897a1a0f72 | refs/heads/main | 2023-06-20T06:35:41.533536 | 2021-07-10T07:52:04 | 2021-07-10T07:52:04 | 369,066,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | num=[10,80,50,40,30,20]
l=len(num)
i=0
while i<l:
j=0
while j<i:
if num[i]>num[j]:
pass
j=j+1
i+=1
print(num[-j]) this is buble sort")) | [
"you@example.com"
] | you@example.com |
3671cea80cef5e56d1c8b1e9baab67b8c3992441 | a6803cfd1fad3e8ae7c5f6ee8a34302516f4f3ec | /640_extent/resnet_extent640_4layer_dropout25.py | 611f2d6ffd305f1d41ea5b6d72839634ffaf81d1 | [] | no_license | LasseGoransson/bachelor-code | 906ae4bbd22f6bb73b4727b62268115f186d922a | 016af73719792252bae58b11a2a43a674109abd7 | refs/heads/master | 2022-08-24T11:59:56.017903 | 2020-05-26T08:22:28 | 2020-05-26T08:22:28 | 238,145,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,002 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import neptune
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Sequential
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D,GlobalAveragePooling2D, Concatenate, Reshape,GlobalMaxPooling2D, Activation, Input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import numpy as np
import pandas
import os
import pathlib
import datetime
import math
import sys
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
# Config loading
train_path = "../../bachelor-data/allTrain.csv"
validate_path ="../../bachelor-data/allTest.csv"
image_dir = "../../bachelor-data/data_640x1030_extentW/"
checkpointpath = "../../bachelor-data/checkpoints/"
modelName = sys.argv[0]
learning_rate = 0.001
image_height =1030
image_width = 640
batch_size = 4
numEpochs = 200
conf= {
"train_path": train_path,
"validate_path": validate_path,
"image_dir": image_dir,
"modelName": modelName,
"learning_rate": learning_rate,
"image_height": image_height,
"image_width": image_width,
"batch_size": batch_size,
"numEpochs": numEpochs,
"aspectImages": "true"
}
# select project
neptune.init('lassegoransson/xrayPredictor')
# Data generators
train_df = pandas.read_csv(train_path)
validate_df = pandas.read_csv(validate_path)
train_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
vertical_flip=True
)
val_datagen = ImageDataGenerator(
rescale=1./255,
)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory=image_dir,
x_col="filename",
y_col='label',
target_size=(image_height, image_width),
batch_size=batch_size,
shuffle=True,
class_mode="raw",
color_mode="rgb"
)
val_generator = val_datagen.flow_from_dataframe(
dataframe=validate_df,
directory=image_dir,
x_col="filename",
y_col='label',
target_size=(image_height, image_width),
batch_size=batch_size,
shuffle=True,
class_mode="raw",
color_mode="rgb"
)
# Model
RESNET = keras.applications.resnet.ResNet50(include_top=False, weights='imagenet', input_shape=(image_height,image_width,3), pooling="avg")
model = tf.keras.Sequential()
#for layer in RESNET.layers:
# model.add(layer)
#for l in model.layers:
# l.trainable=False
# Projection
#model.add(Conv2D(3,(1,1),input_shape=(image_height,image_width,1),padding="same"))
model.add(RESNET)
#model.layers[1].trainable=True
model.add(Dropout(0.25))
model.add(Dense(512,Activation("relu")))
model.add(Dropout(0.25))
model.add(Dense(256,Activation("relu")))
model.add(Dropout(0.25))
model.add(Dense(124,Activation("relu")))
model.add(Dropout(0.25))
model.add(Dense(64,Activation("relu")))
model.add(Dense(1))
optimize = keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(optimizer=optimize,
loss='MSE',
metrics=['mse']
)
class NeptuneMonitor(Callback):
def on_epoch_end(self, epoch, logs={}):
neptune.send_metric('val_loss', epoch, logs['val_loss'])
neptune.send_metric('loss', epoch, logs['loss'])
neptune.send_metric('learning_rate', epoch, float(tf.keras.backend.get_value(self.model.optimizer.lr)))
filepath=str(checkpointpath)+"model_"+str(modelName)+"_checkpoint-"+str(image_height)+"x"+str(image_width)+"-{epoch:03d}-{val_loss:.16f}.hdf5"
RLR = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1, mode='min', min_delta=0.0001, cooldown=0)
checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')
earlyStop = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True,verbose=1)
with neptune.create_experiment(name=modelName, params=conf) as npexp:
neptune_monitor = NeptuneMonitor()
callbacks_list = [checkpoint, neptune_monitor, RLR, earlyStop]
model.summary()
history = model.fit(train_generator,validation_data=val_generator,verbose=1 , epochs=numEpochs, steps_per_epoch=train_generator.n/train_generator.batch_size , callbacks=callbacks_list)
import glob
list_of_files = glob.glob(checkpointpath+"*") # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
modelfileName = latest_file
npexp.send_artifact(modelfileName)
tmp = modelfileName.split('-')[4].split('.')
val = float(tmp[0]+"."+tmp[1])
neptune.send_metric('val_loss', val)
| [
"you@example.com"
] | you@example.com |
fd161207e0b369b7d7a6b1809a4cab6f575b57a7 | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /src/sudo/settings.py | 82c6025fc5de974d3bcce10c3ee45c17c23f942a | [
"Apache-2.0",
"BUSL-1.1",
"BSD-3-Clause"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 1,881 | py | """
sudo.settings
~~~~~~~~~~~~~
:copyright: (c) 2020 by Matt Robenolt.
:license: BSD, see LICENSE for more details.
"""
from django.conf import settings
# Default url to be redirected to after elevating permissions
REDIRECT_URL = getattr(settings, "SUDO_REDIRECT_URL", "/")
# The querystring argument to be used for redirection
REDIRECT_FIELD_NAME = getattr(settings, "SUDO_REDIRECT_FIELD_NAME", "next")
# How long should sudo mode be active for? Duration in seconds.
COOKIE_AGE = getattr(settings, "SUDO_COOKIE_AGE", 10800)
# The domain to bind the sudo cookie to. Default to the current domain.
COOKIE_DOMAIN = getattr(settings, "SUDO_COOKIE_DOMAIN", settings.SESSION_COOKIE_DOMAIN)
# Should the cookie only be accessible via http requests?
# Note: If this is set to False, any JavaScript files have the ability to access
# this cookie, so this should only be changed if you have a good reason to do so.
COOKIE_HTTPONLY = getattr(settings, "SUDO_COOKIE_HTTPONLY", True)
# The name of the cookie to be used for sudo mode.
COOKIE_NAME = getattr(settings, "SUDO_COOKIE_NAME", "sudo")
# Restrict the sudo cookie to a specific path.
COOKIE_PATH = getattr(settings, "SUDO_COOKIE_PATH", "/")
# Only transmit the sudo cookie over https if True.
# By default, this will match the current protocol. If your site is
# https already, this will be True.
COOKIE_SECURE = getattr(settings, "SUDO_COOKIE_SECURE", None)
# An extra salt to be added into the cookie signature
COOKIE_SALT = getattr(settings, "SUDO_COOKIE_SALT", "")
# The name of the session attribute used to preserve the redirect destination
# between the original page request and successful sudo login.
REDIRECT_TO_FIELD_NAME = getattr(settings, "SUDO_REDIRECT_TO_FIELD_NAME", "sudo_redirect_to")
# The url for the sudo page itself. May be a url or a view name
URL = getattr(settings, "SUDO_URL", "sudo.views.sudo")
| [
"noreply@github.com"
] | nagyist.noreply@github.com |
65b1050ec71da8aaaec7459c66e049e59bb12264 | b007d88e6726452ffa8fe80300614f311ae5b318 | /LeetCode/monthly_challenges/2021/march/27_palindrome_substring_count.py | 8d68e0b1d81402b18b72f054addcaa1466345fa8 | [] | no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | """
Palindromic Substrings
Given a string, your task is to count how many palindromic substrings in this string.
The substrings with different start indexes or end indexes are counted as different substrings even they consist of same characters.
Example 1:
Input: "abc"
Output: 3
Explanation: Three palindromic strings: "a", "b", "c".
Example 2:
Input: "aaa"
Output: 6
Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
"""
class Solution:
def countSubstrings(self, s: str) -> int:
def is_palindrome(s):
return s == s[::-1]
count = 0
n = len(s)
for l in range(n):
for r in range(l, n):
count += is_palindrome(s[l:r + 1])
return count
print(Solution().countSubstrings("abcd"))
| [
"jinu.p.r@gmail.com"
] | jinu.p.r@gmail.com |
ac8191849d0491fd40e042225c0b9272cab1af55 | 1af5bbc95a39c505897be519841e02f4ebb0e4f9 | /jtyoui/neuralNetwork/paddle/ernie/run_msra.py | f0b5e8399e4e33de8a7b94c7348cda4d62c0c986 | [
"MIT"
] | permissive | BarryZM/Jtyoui | 037868f7211ee07ddbd4c9c5c3382e290c67fd25 | 08609671237bd5d83d98e1fa796d32ddfc92c274 | refs/heads/master | 2020-09-12T12:10:24.577574 | 2019-11-16T13:52:18 | 2019-11-16T13:52:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,661 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/4/11 11:44
# @Author: Jtyoui@qq.com
from jtyoui.neuralNetwork.paddle.ernie.transformer_encoder import encoder, pre_process_layer
from jtyoui.neuralNetwork.paddle.ernie.vocab import vocal
import os
import numpy as np
from paddle import fluid
ERNIE_MODEL_PARAMETER = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "relu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"max_position_embeddings": 513,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 18000
}
ERNIE_LABEL_MAP = {
"B-PER": 0, # 人名
"I-PER": 1,
"B-ORG": 2, # 机构名
"I-ORG": 3,
"B-LOC": 4, # 地名
"I-LOC": 5,
"O": 6
}
# 需要自己更改
model_path, config, label_map_config = None, ERNIE_MODEL_PARAMETER, ERNIE_LABEL_MAP
def pad_batch_data(inst, pad_idx=0, input_mask=False):
return_list = []
max_len = max(len(inst) for inst in inst)
inst_data = np.array([inst + list([pad_idx] * (max_len - len(inst))) for inst in inst])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
if input_mask:
input_mask_data = np.array([[1] * len(inst) + [0] * (max_len - len(inst)) for inst in inst])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
return return_list if len(return_list) > 1 else return_list[0]
def prepare_batch_data(example):
words = [1] + [vocal[word] for word in example if word in vocal] + [2]
padded_token_ids, input_mask = pad_batch_data([words], 0, True)
padded_text_type_ids = pad_batch_data([[0] * len(words)])
padded_position_ids = pad_batch_data([list(range(len(words)))])
padded_label_ids = pad_batch_data([[8] * len(words)], len(label_map_config) - 1)
return_list = [padded_token_ids, padded_text_type_ids, padded_position_ids, input_mask, padded_label_ids]
yield return_list
def data_generator(input_str):
def wrapper():
for batch_data in prepare_batch_data(input_str):
yield batch_data
return wrapper
def init_checkpoint(exe, init_checkpoint_path, main_program):
def existed(var):
if not fluid.io.is_persistable(var):
return False
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(exe, init_checkpoint_path, main_program=main_program, predicate=existed)
def evaluate(exe, program, reader, graph_vars):
fetch_list = [graph_vars["labels"].name, graph_vars["infers"].name]
total_number = None
while True:
reader.start()
try:
_, np_infers = exe.run(program=program, fetch_list=fetch_list)
total_number = [ls[0] for ls in np_infers[1:-1]]
except Exception as e:
print(e)
reader.reset()
break
return total_number
def create_model():
reader = fluid.layers.py_reader(capacity=50, shapes=[[-1, 256, 1]] * 5, lod_levels=[0] * 5, use_double_buffer=True,
dtypes=['int64'] * 3 + ['float32', 'int64'])
src_ids, sent_ids, pos_ids, input_mask, labels = fluid.layers.read_file(reader)
self_attn_mask = fluid.layers.matmul(x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(x=[self_attn_mask] * config['num_attention_heads'], axis=1)
n_head_self_attn_mask.stop_gradient = True
param_initializer = fluid.initializer.TruncatedNormal(config['initializer_range'])
emb_out = fluid.layers.embedding(
input=src_ids,
size=[config['vocab_size'], config['hidden_size']],
dtype="float32",
param_attr=fluid.ParamAttr(name="word_embedding", initializer=param_initializer), is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=pos_ids,
size=[config['max_position_embeddings'], config['hidden_size']],
dtype="float32",
param_attr=fluid.ParamAttr(name="pos_embedding", initializer=param_initializer))
sent_emb_out = fluid.layers.embedding(
sent_ids,
size=[config['type_vocab_size'], config['hidden_size']],
dtype="float32",
param_attr=fluid.ParamAttr(name="sent_embedding", initializer=param_initializer))
emb_out += position_emb_out + sent_emb_out
emb_out = pre_process_layer(emb_out, 'nd', config['hidden_dropout_prob'], name='pre_encoder')
enc_out = encoder(
n_layer=config['num_hidden_layers'],
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_head=config['num_attention_heads'],
d_key=config['hidden_size'] // config['num_attention_heads'],
d_value=config['hidden_size'] // config['num_attention_heads'],
d_model=config['hidden_size'],
d_inner_hid=config['hidden_size'] * 4,
prepostprocess_dropout=config['hidden_dropout_prob'],
attention_dropout=config['attention_probs_dropout_prob'],
relu_dropout=0,
hidden_act=config['hidden_act'],
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=param_initializer,
name='encoder')
log = fluid.layers.fc(input=enc_out, size=len(label_map_config), num_flatten_dims=2,
param_attr=fluid.ParamAttr(name="cls_seq_label_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(name="cls_seq_label_out_b",
initializer=fluid.initializer.Constant(0.)))
ret_labels = fluid.layers.reshape(x=labels, shape=[-1, 1])
ret_infers = fluid.layers.reshape(x=fluid.layers.argmax(log, axis=2), shape=[-1, 1])
graph_vars = {"labels": ret_labels, "infers": ret_infers}
for v in graph_vars.values():
v.persistable = True
return reader, graph_vars
def match(words, init_st: list):
"""抽取实体函数
:param words:需要抽取的文字
:param init_st:初始化参数。st()
:return:数字列表,这些数字是在label_map_config中配置的
"""
init_st[2].decorate_tensor_provider(data_generator(words))
number = evaluate(*init_st)
return number
def st(new_model_path=None, new_config=None, new_label_map_config=None) -> list:
"""初始化模型,只需要加载一次即可
:param new_model_path: 模型路径
:param new_config: 模型配置参数
:param new_label_map_config: 模型实体映射
"""
global model_path, config, label_map_config
if new_model_path:
model_path = new_model_path
if new_config:
config = new_config
if new_label_map_config:
label_map_config = new_label_map_config
exe = fluid.Executor(fluid.CPUPlace())
startup_program = fluid.Program()
test_program = fluid.Program()
with fluid.program_guard(test_program, startup_program):
with fluid.unique_name.guard():
test_reader, graph_vars = create_model()
test_program = test_program.clone(for_test=True)
exe.run(startup_program)
init_checkpoint(exe, model_path, main_program=startup_program)
return [exe, test_program, test_reader, graph_vars]
if __name__ == '__main__':
# 默认的模型参数和映射表
ERNIE_MODEL_PATH = 'D://model'
s = st(ERNIE_MODEL_PATH)
print(match('我叫刘万光我是贵阳市南明村永乐乡水塘村的村民', s))
| [
"jtyoui@qq.com"
] | jtyoui@qq.com |
81d7753dba35d789f59cee9ca7f6a30e69866f6e | c868d681415d152ba331bd80e0ed542832f20f0e | /week 1/Informatics/2. conditional_operators/b.py | 77bce2b9a69e5fecf01a19fc95c2c9b934653cf9 | [] | no_license | Yeldarmt/BFDjango | a297a6b0c00ffb1a269f05c7e6665c5d34a51097 | b8256ff1d5f2125495df66eabf267fc17e667aeb | refs/heads/master | 2022-11-30T12:45:17.356453 | 2020-04-19T16:50:26 | 2020-04-19T16:50:26 | 233,515,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | n = int(input())
if n%4==0 and n%100!=0 or n%400==0:
print("YES")
else:
print("NO") | [
"eldarmukhametkazin@gmail.com"
] | eldarmukhametkazin@gmail.com |
d26161caf55999dd250bc6d91896d5ba2425d3d6 | 09912a852e0e20d6a475ef904724f80072a68359 | /eds/FrontEnd/server/openmtc-ngsi/lib/futile/string/__init__.py | bb3f869e1ce8737c0fb47dcff6b960676b61f2db | [
"Apache-2.0"
] | permissive | elastest/elastest-device-emulator-service | 034aa19438383df0975bf86d49e231342d63002f | f512355c5fde6bf027d23558e256b96e2296e0f2 | refs/heads/master | 2021-03-09T15:13:30.676138 | 2020-01-13T12:02:02 | 2020-01-13T12:02:02 | 91,440,225 | 3 | 9 | Apache-2.0 | 2018-12-03T14:59:27 | 2017-05-16T09:26:10 | Python | UTF-8 | Python | false | false | 414 | py | import string
letters_digits_underscore = string.letters + string.digits + "_"
class InvalidIdentifier(ValueError):
pass
def is_identifier(s):
if not s or s[0] not in string.letters:
return False
for c in s:
if c not in letters_digits_underscore:
return False
return True
def check_identifier(s):
if not is_identifier(s):
raise InvalidIdentifier(s)
| [
"sro"
] | sro |
d500b52694b7a8b3507cac32ae78b4c9b63e125c | 21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5 | /ArraysAndStrings/zSearch/zSearch_test.py | 8e6103cc3afa695d7c5b2498d27d8054250359c9 | [] | no_license | rongfeng-china/python-algorithms-and-data-structures | eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04 | a69241bb7b684bc7d00acdd46c2fc214f7b61887 | refs/heads/master | 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from zSearch import search
import pytest
def test_zSearch():
string = 'baabaa'
pattern = 'aab'
assert(search(string, pattern) == 1)
| [
"prathamt@outlook.com"
] | prathamt@outlook.com |
1ca9d2844fd7f25bef3d7f66f59a24fc318cab89 | 8da9d3c3e769ead17f5ad4a4cba6fb3e84a9e340 | /src/chila/codexGtk/base/app/connectors/gen/ConnectionPerformer.py | a1ae9d34245a5cb3b3994f97850b732c8cbcaa19 | [] | no_license | blockspacer/chila | 6884a540fafa73db37f2bf0117410c33044adbcf | b95290725b54696f7cefc1c430582f90542b1dec | refs/heads/master | 2021-06-05T10:22:53.536352 | 2016-08-24T15:07:49 | 2016-08-24T15:07:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | #!/usr/bin/env python
connInstanceSeq = [ ['Application', 'base.application'],
['MainWindow', 'base.mainWindow'],
['FileChooser', 'base.fileChooser.open'],
['FileChooser', 'base.fileChooser.saveAs'],
['ActionExecuter', 'base.actionExecuter'],
['CmdNetServer', 'base.appCommand.netServer'],
['FunDynExecuter', 'base.appCommand.funDynExecuter'],
['FunDynXMLParser', 'base.appCommand.funDynXMLParser'],
['CmdNetTextMsgCreator', 'base.appCommand.msgCreator'] ]
| [
"chilabot@gmail.com"
] | chilabot@gmail.com |
1d03ea638ca2af8b958398f4ea023c74b7a6fa67 | dfb6a80dda5882a1c2be87b0b6e1e7a87a7b4c20 | /test/test_task_status.py | 5a10bfb8adbc5c54b1f995ceca7541743c5b7690 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | artikcloud/artikcloud-python | a090201bea9fadbdf5dd57d94d9085b03b34f927 | c5489b2fca27fd9a8bcea99f309e02cb690dd349 | refs/heads/master | 2020-12-26T03:33:00.657575 | 2017-12-28T20:40:05 | 2017-12-28T20:40:05 | 55,102,598 | 13 | 11 | null | 2017-03-18T03:22:58 | 2016-03-30T22:38:07 | Python | UTF-8 | Python | false | false | 793 | py | # coding: utf-8
"""
ARTIK Cloud API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import artikcloud
from artikcloud.rest import ApiException
from artikcloud.models.task_status import TaskStatus
class TestTaskStatus(unittest.TestCase):
""" TaskStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testTaskStatus(self):
"""
Test TaskStatus
"""
model = artikcloud.models.task_status.TaskStatus()
if __name__ == '__main__':
unittest.main()
| [
"jswattonjue@gmail.com"
] | jswattonjue@gmail.com |
548e856a0fa3281825d8b21721b32cb264401c73 | af3ff734866bca30f1821976649d197457f6f66a | /corpus_builder/spiders/newspaper/bhorerkagoj.py | 6f54a1479df963016770b2dd2f87afbaa691e9b6 | [
"MIT"
] | permissive | anwar03/corpus-builder | e69b26e3985ffdc27970117825069b9df2e4387e | 2c3e0d39a0a3dabacda0335d18779a0647bad118 | refs/heads/master | 2022-12-08T21:31:16.051904 | 2020-09-01T08:35:14 | 2020-09-01T08:35:14 | 280,804,773 | 0 | 0 | null | 2020-07-19T06:15:28 | 2020-07-19T06:15:27 | null | UTF-8 | Python | false | false | 1,897 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from corpus_builder.items import TextEntry
from corpus_builder.templates.spider import CommonSpider
class BhorerkagojSpider(CommonSpider):
name = 'bhorerkagoj'
allowed_domains = ['bhorerkagoj.net']
base_url = 'http://www.bhorerkagoj.net' + '/online'
start_request_url = base_url
content_body = {
'css': 'div.entry p::text'
}
allowed_configurations = [
['start_page'],
['start_page', 'end_page'],
['category', 'start_page'],
['category', 'start_page', 'end_page']
]
rules = (
Rule(LinkExtractor(
allow='\/\d{4}\/\d{2}\/\d{2}\/\d+\.php$'
),
callback='parse_content'),
)
def request_index(self, response):
categories = []
if not self.category:
categories = list(set(response.css('#navcatlist a::attr("href")').re('(?<=category/).*')))
else:
categories = response.css('#navcatlist a::attr("href")').re('category/{0}'.format(self.category))
if not categories:
categories = list(set(response.css('#navcatlist a::attr("href")').re('(?<=category/).*')))
raise ValueError('invalid category slug. available slugs: \'%s\'' % "', '".join(categories))
for category in categories:
for page in range(self.start_page, self.end_page + 1):
yield scrapy.Request(self.base_url + '/category/' + category + '/page/{0}'.format(str(page)),
callback=self.start_news_requests)
def start_news_requests(self, response):
news_links = list(set(response.css('.news-box h3 a::attr("href")').extract()))
for link in news_links:
yield self.make_requests_from_url(link)
| [
"aniruddha@adhikary.net"
] | aniruddha@adhikary.net |
9231f3551e55d893656a5014bb2189412903f81d | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_route_filter_rules_operations.py | a484f033d36918f0c61ec5e82a5ba016d1bbadae | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 21,968 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations(object):
"""RouteFilterRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilterRule"]
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_09_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterRuleListResult"]
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
35715b56113b406fe9d97f7c6d4013cdd75747a5 | 6c2dbc8d4e536220fb3b1cc72aa8104aea8b0698 | /aiogram/methods/copy_message.py | b707580e2d3e903bc6c6470534da52b4c04b5823 | [
"MIT"
] | permissive | aiogram/aiogram | f8f98a0beb63bd4d93ea810638d5792569bf354b | 04bd0c9e7c5421c060183b90d515050f41377bc1 | refs/heads/dev-3.x | 2023-08-30T21:20:13.018174 | 2023-08-28T23:01:54 | 2023-08-28T23:01:54 | 111,210,856 | 4,287 | 1,250 | MIT | 2023-09-10T21:34:03 | 2017-11-18T14:11:13 | Python | UTF-8 | Python | false | false | 5,003 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, Union
from ..types import (
UNSET_PARSE_MODE,
ForceReply,
InlineKeyboardMarkup,
MessageEntity,
MessageId,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
)
from ..types.base import UNSET_PROTECT_CONTENT
from .base import TelegramMethod
class CopyMessage(TelegramMethod[MessageId]):
"""
Use this method to copy messages of any kind. Service messages and invoice messages can't be copied. A quiz :class:`aiogram.methods.poll.Poll` can be copied only if the value of the field *correct_option_id* is known to the bot. The method is analogous to the method :class:`aiogram.methods.forward_message.ForwardMessage`, but the copied message doesn't have a link to the original message. Returns the :class:`aiogram.types.message_id.MessageId` of the sent message on success.
Source: https://core.telegram.org/bots/api#copymessage
"""
__returning__ = MessageId
__api_method__ = "copyMessage"
chat_id: Union[int, str]
"""Unique identifier for the target chat or username of the target channel (in the format :code:`@channelusername`)"""
from_chat_id: Union[int, str]
"""Unique identifier for the chat where the original message was sent (or channel username in the format :code:`@channelusername`)"""
message_id: int
"""Message identifier in the chat specified in *from_chat_id*"""
message_thread_id: Optional[int] = None
"""Unique identifier for the target message thread (topic) of the forum; for forum supergroups only"""
caption: Optional[str] = None
"""New caption for media, 0-1024 characters after entities parsing. If not specified, the original caption is kept"""
parse_mode: Optional[str] = UNSET_PARSE_MODE
"""Mode for parsing entities in the new caption. See `formatting options <https://core.telegram.org/bots/api#formatting-options>`_ for more details."""
caption_entities: Optional[List[MessageEntity]] = None
"""A JSON-serialized list of special entities that appear in the new caption, which can be specified instead of *parse_mode*"""
disable_notification: Optional[bool] = None
"""Sends the message `silently <https://telegram.org/blog/channels-2-0#silent-messages>`_. Users will receive a notification with no sound."""
protect_content: Optional[bool] = UNSET_PROTECT_CONTENT
"""Protects the contents of the sent message from forwarding and saving"""
reply_to_message_id: Optional[int] = None
"""If the message is a reply, ID of the original message"""
allow_sending_without_reply: Optional[bool] = None
"""Pass :code:`True` if the message should be sent even if the specified replied-to message is not found"""
reply_markup: Optional[
Union[InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply]
] = None
"""Additional interface options. A JSON-serialized object for an `inline keyboard <https://core.telegram.org/bots/features#inline-keyboards>`_, `custom reply keyboard <https://core.telegram.org/bots/features#keyboards>`_, instructions to remove reply keyboard or to force a reply from the user."""
if TYPE_CHECKING:
# DO NOT EDIT MANUALLY!!!
# This section was auto-generated via `butcher`
def __init__(
__pydantic__self__,
*,
chat_id: Union[int, str],
from_chat_id: Union[int, str],
message_id: int,
message_thread_id: Optional[int] = None,
caption: Optional[str] = None,
parse_mode: Optional[str] = UNSET_PARSE_MODE,
caption_entities: Optional[List[MessageEntity]] = None,
disable_notification: Optional[bool] = None,
protect_content: Optional[bool] = UNSET_PROTECT_CONTENT,
reply_to_message_id: Optional[int] = None,
allow_sending_without_reply: Optional[bool] = None,
reply_markup: Optional[
Union[InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply]
] = None,
**__pydantic_kwargs: Any,
) -> None:
# DO NOT EDIT MANUALLY!!!
# This method was auto-generated via `butcher`
# Is needed only for type checking and IDE support without any additional plugins
super().__init__(
chat_id=chat_id,
from_chat_id=from_chat_id,
message_id=message_id,
message_thread_id=message_thread_id,
caption=caption,
parse_mode=parse_mode,
caption_entities=caption_entities,
disable_notification=disable_notification,
protect_content=protect_content,
reply_to_message_id=reply_to_message_id,
allow_sending_without_reply=allow_sending_without_reply,
reply_markup=reply_markup,
**__pydantic_kwargs,
)
| [
"noreply@github.com"
] | aiogram.noreply@github.com |
34f4866b56d43fe79f5e293b9d8e3990e176d8a2 | c3015c988450974e1ab5ea71eac4baf921d9bde7 | /Day_10/2_combination_sum_II.py | fba8d99e8b244e5f2dae28c6bd98d51c8253cc1b | [] | no_license | anoubhav/30-Day-SDE-Challenge | ce758c450e32d0c353e7ba00aaaf37771fe69d32 | 175edd6aba8a8b45482f485dddfafa43c267246c | refs/heads/master | 2023-01-20T09:54:26.685272 | 2020-11-23T18:39:40 | 2020-11-23T18:39:40 | 286,107,830 | 3 | 2 | null | 2020-10-01T16:51:49 | 2020-08-08T19:35:50 | Python | UTF-8 | Python | false | false | 1,301 | py | # Q: https://leetcode.com/problems/combination-sum-ii/
def combSumBackTrackDFS(candidates, target):
# Time complexity: O(2 ^ N), where N is length of candidate. Each element in candidate can be included or not.
ans = []
candidates.sort() # as duplicates are allowed in candidates
n = len(candidates)
def recurse(tot, comb, ind, n):
if tot == 0:
ans.append(comb.copy())
else:
i = ind
while i < n:
c = candidates[i]
if tot - c >= 0:
# each number only used once; Hence, i + 1
recurse(tot - c, comb + [c], i + 1, n)
# ensure the next no. added to the combination is not same as current, as all possibilities starting from current have been explored. Below loop is only possible by sorting.
i += 1
while i < n and candidates[i] == c:
i += 1
else:
# sorted candidates
break
recurse(target, [], 0, n)
return ans
candidates = [10,1,2,7,6,1,5]
target = 8
# candidates = [2,5,2,1,2]
# target = 5
print(combSumBackTrackDFS(candidates, target))
1, 2, 2, 2, 5 | [
"anoubhav.agarwaal@gmail.com"
] | anoubhav.agarwaal@gmail.com |
13b0b204500bf8394736ff1df09efa7565a20da4 | af55d33a21cda3e3b9fe10224105eef9f97ad8ec | /saseumn/util.py | 864b43487ac5e322bf4c6226fc1b59b7603e91b7 | [
"MIT"
] | permissive | saseumn/website | 7a06a4a6abafe7da0c5afb0fc4ba2226ab7ce0d7 | 34790dc7db0f7bbb1736761c5738e4d74066f518 | refs/heads/master | 2021-01-01T17:24:49.013633 | 2019-04-03T18:06:32 | 2019-04-03T18:06:32 | 98,065,352 | 3 | 1 | MIT | 2019-03-30T23:12:10 | 2017-07-23T00:26:26 | HTML | UTF-8 | Python | false | false | 2,246 | py | import logging
import random
import re
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from functools import wraps
import hashlib
from urllib.parse import urljoin, urlparse
from flask import abort, flash, redirect, request, url_for
from flask_login import current_user
from saseumn.config import Config
VALID_USERNAME = re.compile(r"^[A-Za-z_][A-Za-z\d_]*$")
# decorators
def admin_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not (current_user.is_authenticated and current_user.admin):
flash("You don't have permission to view this page.", "danger")
return redirect(url_for("base.index"))
return f(*args, **kwargs)
return wrapper
def random_string(length=32, alpha="012346789abcdef"):
""" Generates a random string of length length using characters from alpha. """
characters = [random.choice(alpha) for x in range(length)]
return "".join(characters)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
def get_redirect_target():
for target in request.values.get("next"), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
def redirect_back(endpoint, **values):
target = request.form.get("next", url_for("users.profile"))
if not target or not is_safe_url(target):
target = url_for(endpoint, **values)
return redirect(target)
def hash_file(file, algorithm=hashlib.sha256):
# file is a file-like object
contents = file.read()
return algorithm(contents).hexdigest()
def send_email(recipient, subject, body, from_addr="example@exmaple.org"):
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
credentials = Config.get_email_credentials()
if not credentials:
return
server.login(*credentials)
msg = MIMEMultipart()
msg["From"] = from_addr
msg["To"] = recipient
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
server.sendmail(from_addr, recipient, msg.as_string())
| [
"failed.down@gmail.com"
] | failed.down@gmail.com |
e50d06dfd3630d296a367aa92bf4288a9c9d0649 | 2a2e503746bd2118047d830a3c2b1910ea7a7b0a | /第七章/1.py | 5efb312df1717a83226bf2a913c2d4fd5afedf0a | [] | no_license | hc973591409/python-auto | dabd1bfae7a5f24b49e25bd855ef8456494aa2b8 | c4182d81693ea93d27965c261ad7dffce2cd180a | refs/heads/master | 2020-04-01T10:53:23.490608 | 2018-10-16T12:58:30 | 2018-10-16T12:58:30 | 153,136,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | ?匹配零次或一次前面的分组。可以关闭贪婪模式
*匹配零次或多次前面的分组。
+匹配一次或多次前面的分组。
{n}匹配 n 次前面的分组。
{n,}匹配 n 次或更多前面的分组。
{,m}匹配零次到 m 次前面的分组。
{n,m}匹配至少 n 次、至多 m 次前面的分组。
{n,m}?或*?或+?对前面的分组进行非贪心匹配。
^spam 意味着字符串必须以 spam 开始。
spam$意味着字符串必须以 spam 结束。
.匹配所有字符,换行符除外。
\d、 \w 和\s 分别匹配数字、单词和空格。
\D、 \W 和\S 分别匹配出数字、单词和空格外的所有字符。
[abc]匹配方括号内的任意字符(诸如 a、 b 或 c)。
[^abc]匹配不在方括号内的任意字符。
# 不区分大小写的匹配模式regex对象
robocop = re.compile(r'robocop', re.I) | [
"973591409@qq.com"
] | 973591409@qq.com |
2b8ae9922a31196bcf079263540885dd6b9a5baf | c440ae324c8d5487679b066b62e64176487b4f6a | /mysite/mysite/views.py | 19b9b86888396084e221b42691a1a5db89fabcd3 | [] | no_license | gangyou/python_execrise | d19eef8acf9e6565e56b27204184ca018d0e7712 | 32afdd9b45a0ecc6c966471bda0d3e03ac632aea | refs/heads/master | 2021-01-23T05:35:21.659681 | 2014-02-20T01:24:26 | 2014-02-20T01:24:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | from django.http import HttpResponse
from django.shortcuts import render_to_response
import datetime
import MySQLdb
def hello(request):
return HttpResponse("Hello World")
def current_datetime(request):
now = datetime.datetime.now()
return render_to_response('dateapp/current_datetime.html', {'current_date': now})
def hours_ahead(request, offset):
try:
offset = int(offset)
except ValueError:
raise Http404()
next_time = datetime.datetime.now() + datetime.timedelta(hours=offset)
return render_to_response('dateapp/hours_ahead.html', locals())
def display_meta(request):
values = request.META.items()
values.sort()
html = []
for k, v in values:
html.append('<tr><td>%s</td><td>%s</td></tr>' % (k,v))
return HttpResponse('<table>%s</table>' % '\n'.join(html))
def login(request):
if request.method != 'POST':
raise Http404('Only POSTs are allowed')
try:
m = Memeberr | [
"gangyou@gmail.com"
] | gangyou@gmail.com |
cef1ccebbe861a3c2822fb09619b360df1476b15 | 0af55951ccc6da45001bfdc80dad6a9607334849 | /pyvko/aspects/albums.py | d7b7e36da7beb53194a68a3763e99967e169a589 | [
"MIT"
] | permissive | djachenko/pyvko | a1e73095aaa88abc1786f3a1192b3b8ec9dcf85e | 4d2534bd392d073c9ade0ed7c51d021b1d8f6426 | refs/heads/master | 2022-11-14T05:46:30.038235 | 2022-10-08T15:05:31 | 2022-10-08T15:05:31 | 201,685,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | from abc import abstractmethod, ABC
from pathlib import Path
from typing import Dict, List
from vk import API
from pyvko.api_based import ApiMixin, ApiBased
from pyvko.attachment.attachment import Attachment
from pyvko.attachment.photo import Photo
from pyvko.shared.photos_uploader import AlbumPhotoUploader
from pyvko.shared.utils import get_all
class Album(ApiBased, Attachment):
def __init__(self, api: API, api_object: dict) -> None:
super().__init__(api)
self.__name = api_object["title"]
self.__id = api_object["id"]
self.__owner_id = api_object["owner_id"]
@property
def name(self) -> str:
return self.__name
@property
def id(self) -> int:
return self.__id
def get_photos(self) -> List[Photo]:
parameters = self.get_request()
photos_descriptions = get_all(parameters, self.api.photos.get)
photos = [Photo(photo_object) for photo_object in photos_descriptions]
return photos
def get_request(self, parameters: Dict = None) -> dict:
parameters = parameters.copy()
parameters.update({
"owner_id": self.__owner_id,
"album_id": self.__id
})
return super().get_request(parameters)
def set_cover(self, cover: Photo):
request = self.get_request({
"photo_id": cover.id
})
self.api.photos.makeCover(**request)
def add_photo(self, path: Path) -> Photo:
uploader = AlbumPhotoUploader(self.api, self.id, -self.__owner_id)
return uploader.upload(path)
# region Attachment
@property
def type(self) -> str:
return "album"
@property
def owner_id(self) -> int:
return self.__owner_id
@property
def media_id(self) -> int:
return self.id
# endregion Attachment
class Albums(ApiMixin, ABC):
@property
@abstractmethod
def id(self) -> int:
pass
def __get_albums(self, parameters: Dict = None) -> List[Album]:
request = self.__get_owned_request(parameters)
result = self.api.photos.getAlbums(**request)
albums = [Album(self.api, album_object) for album_object in result["items"]]
return albums
def get_all_albums(self) -> List[Album]:
return self.__get_albums()
def get_album_by_id(self, album_id: int) -> Album:
albums_list = self.__get_albums({
"album_ids": [album_id]
})
assert len(albums_list) == 1
return albums_list[0]
def create_album(self, name: str) -> Album:
parameters = {
"title": name,
"group_id": abs(self.id),
"upload_by_admins_only": 1
}
parameters = self.get_request(parameters)
response = self.api.photos.createAlbum(**parameters)
created_album = Album(self.api, response)
return created_album
def __get_owned_request(self, parameters: Dict = None) -> dict:
if parameters is None:
parameters = {}
else:
parameters = parameters.copy()
assert "owner_id" not in parameters
parameters.update({
"owner_id": self.id
})
return self.get_request(parameters)
| [
"i.s.djachenko@gmail.com"
] | i.s.djachenko@gmail.com |
81ae7d5d9cb2b61b86e720254a4097c66638524c | 72a58c62d62210e853ef09fdee65bf6ffb8972bd | /src/lib/telegram/utils/webhookhandler.py | 04fc127aaa8840233bc1901e805cff440e5c8e26 | [
"MIT"
] | permissive | thonkify/thonkify | 93ade2489f20fb80c5e8e27fe67b9b231ada62bd | 2cb4493d796746cb46c8519a100ef3ef128a761a | refs/heads/master | 2023-09-01T00:03:10.398583 | 2018-03-16T09:18:24 | 2018-03-16T09:18:24 | 99,354,595 | 17 | 3 | MIT | 2023-09-05T02:27:42 | 2017-08-04T15:10:50 | Python | UTF-8 | Python | false | false | 4,111 | py | import logging
from telegram import Update
from future.utils import bytes_to_native_str
from threading import Lock
try:
import ujson as json
except ImportError:
import json
try:
import BaseHTTPServer
except ImportError:
import http.server as BaseHTTPServer
logging.getLogger(__name__).addHandler(logging.NullHandler())
class _InvalidPost(Exception):
def __init__(self, http_code):
self.http_code = http_code
super(_InvalidPost, self).__init__()
class WebhookServer(BaseHTTPServer.HTTPServer, object):
def __init__(self, server_address, RequestHandlerClass, update_queue, webhook_path, bot):
super(WebhookServer, self).__init__(server_address, RequestHandlerClass)
self.logger = logging.getLogger(__name__)
self.update_queue = update_queue
self.webhook_path = webhook_path
self.bot = bot
self.is_running = False
self.server_lock = Lock()
self.shutdown_lock = Lock()
def serve_forever(self, poll_interval=0.5):
with self.server_lock:
self.is_running = True
self.logger.debug('Webhook Server started.')
super(WebhookServer, self).serve_forever(poll_interval)
self.logger.debug('Webhook Server stopped.')
def shutdown(self):
with self.shutdown_lock:
if not self.is_running:
self.logger.warn('Webhook Server already stopped.')
return
else:
super(WebhookServer, self).shutdown()
self.is_running = False
# WebhookHandler, process webhook calls
# Based on: https://github.com/eternnoir/pyTelegramBotAPI/blob/master/
# examples/webhook_examples/webhook_cpython_echo_bot.py
class WebhookHandler(BaseHTTPServer.BaseHTTPRequestHandler, object):
server_version = 'WebhookHandler/1.0'
def __init__(self, request, client_address, server):
self.logger = logging.getLogger(__name__)
super(WebhookHandler, self).__init__(request, client_address, server)
def do_HEAD(self):
self.send_response(200)
self.end_headers()
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
self.logger.debug('Webhook triggered')
try:
self._validate_post()
clen = self._get_content_len()
except _InvalidPost as e:
self.send_error(e.http_code)
self.end_headers()
else:
buf = self.rfile.read(clen)
json_string = bytes_to_native_str(buf)
self.send_response(200)
self.end_headers()
self.logger.debug('Webhook received data: ' + json_string)
update = Update.de_json(json.loads(json_string), self.server.bot)
self.logger.debug('Received Update with ID %d on Webhook' % update.update_id)
self.server.update_queue.put(update)
def _validate_post(self):
if not (self.path == self.server.webhook_path and 'content-type' in self.headers and
self.headers['content-type'] == 'application/json'):
raise _InvalidPost(403)
def _get_content_len(self):
clen = self.headers.get('content-length')
if clen is None:
raise _InvalidPost(411)
try:
clen = int(clen)
except ValueError:
raise _InvalidPost(403)
if clen < 0:
raise _InvalidPost(403)
return clen
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions.
It overrides ``BaseHTTPRequestHandler.log_message``, which logs to ``sys.stderr``.
The first argument, FORMAT, is a format string for the message to be logged. If the format
string contains any % escapes requiring parameters, they should be specified as subsequent
arguments (it's just like printf!).
The client ip is prefixed to every message.
"""
self.logger.debug("%s - - %s" % (self.address_string(), format % args))
| [
"david.dellsperger@gmail.com"
] | david.dellsperger@gmail.com |
ba0dd254252f4869fbf6ee211fed3184068abb40 | 78ef0d7736075ee33ac4230f47c078bbf2b0e014 | /news/tests/factories.py | 260f2c6c40c3bea038e82e97e4b5815de433bef0 | [
"Apache-2.0"
] | permissive | PyAr/pyarweb | e22e9350bf107329e5a79c2368fb182958a134d2 | 5f88d1ea0cea9bd67547b70dc2c8bbaa3b8b9d03 | refs/heads/master | 2023-08-31T10:24:53.220031 | 2023-08-29T16:21:57 | 2023-08-29T16:21:57 | 17,032,696 | 64 | 108 | Apache-2.0 | 2023-09-07T04:02:53 | 2014-02-20T19:28:31 | Python | UTF-8 | Python | false | false | 757 | py | from factory import SubFactory, Sequence, post_generation
from factory.django import DjangoModelFactory
from events.tests.factories import UserFactory
from news.models import NewsArticle
class NewsArticleFactory(DjangoModelFactory):
class Meta:
model = NewsArticle
owner = SubFactory(UserFactory)
title = Sequence(lambda n: 'news_title_%i' % n)
@post_generation
def set_created(obj, create, extracted, **kwargs):
"""
Update the creation time of the built instance. As it is an auto-generated field, we must
set its value after creation.
To use: NewsArticleFactory(set_created='1985-10-26 09:00Z')
"""
if extracted:
obj.created = extracted
obj.save()
| [
"noreply@github.com"
] | PyAr.noreply@github.com |
bdb367c22ca5a5d0cbfd7aeadad6fc5d05cd73de | 38d2ae7fd3ff660704bfeef51087454e6a52191e | /python/prob433/single/prob433.py | 21ef79f73b7c23492a384f457270093fff40d8f0 | [] | no_license | afraenkel/project-euler | 59e8083d4ab3931957e86231636c19ffbc7153d1 | 7db1869f77ca5e5c18f0537b814df2da8175b288 | refs/heads/master | 2020-04-15T17:30:06.586563 | 2016-08-29T22:38:02 | 2016-08-29T22:38:02 | 40,340,384 | 0 | 0 | null | 2015-11-01T09:10:56 | 2015-08-07T04:03:19 | C | UTF-8 | Python | false | false | 1,217 | py |
import itertools as it
# Let E(x0, y0) be the number of steps it takes to determine
# the greatest common divisor of x0 and y0 with Euclid's algorithm.
# Define S(N) as the sum of E(x,y) for 1 ≤ x,y ≤ N.
# We have S(1) = 1, S(10) = 221 and S(100) = 39826.
# Find S(5·10^6).
def E(a, b):
k = 0
while b:
a, b = b, a%b
k += 1
return k
N = 10
lens = (x for k in it.count(1) for x in it.repeat(k,2))
cols = it.count(2)
d = 0
for c,l in zip(cols, lens):
first_row = 2*c - l
if first_row > N:
break
for r in range(first_row, first_row + l):
if r > N:
break
f = (r-c)
incr = 0
while (r+incr) <= N:
d += E(r, c)*( (N-c-incr)//(c+incr) )
incr += f
d += (N - r + 1) // c
d += (N-1)
d *= 2
d += (N-1)*N//2
d += N
print(d)
# This starts getting slow at n=1000
# Use the fact that:
# (1) E(a,b) = E(b,a) (obvious)
# (2) E(a,b) = E(ka, kb) for all a,b,k (clear from euclid algo)
# above is not enough
# probably compute a bunch of gcd steps at each step using memoizing
def S(n):
d = 0
for x in range(1,n+1):
for y in range(1,n+1):
d += E(x,y)
return d
| [
"aaron.fraenkel@gmail.com"
] | aaron.fraenkel@gmail.com |
65b44cc206dd1be264056206b323a537eee6626f | da1dbb0e1c8c323bbf7ba0eac43b5815ce075282 | /python/ccxt/async_support/bitflyer.py | 62c6dcfde8bd614ee090a59065d8e6b399e31a07 | [
"MIT"
] | permissive | alexander-dev-hub/ccxt | d339662d527bdf0d99380c61ccce233c4475d1a1 | eba5dbe98cf106361c45cec9debda3d2722ea878 | refs/heads/master | 2022-07-10T05:03:35.809978 | 2019-09-02T19:10:10 | 2019-09-02T19:10:10 | 205,919,117 | 1 | 1 | MIT | 2022-06-22T15:56:21 | 2019-09-02T19:00:14 | JavaScript | UTF-8 | Python | false | false | 16,724 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import OrderNotFound
class bitflyer (Exchange):
def describe(self):
return self.deep_extend(super(bitflyer, self).describe(), {
'id': 'bitflyer',
'name': 'bitFlyer',
'countries': ['JP'],
'version': 'v1',
'rateLimit': 1000, # their nonce-timestamp is in seconds...
'has': {
'CORS': False,
'withdraw': True,
'fetchMyTrades': True,
'fetchOrders': True,
'fetchOrder': 'emulated',
'fetchOpenOrders': 'emulated',
'fetchClosedOrders': 'emulated',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28051642-56154182-660e-11e7-9b0d-6042d1e6edd8.jpg',
'api': 'https://api.bitflyer.jp',
'www': 'https://bitflyer.jp',
'doc': 'https://lightning.bitflyer.com/docs?lang=en',
},
'api': {
'public': {
'get': [
'getmarkets/usa', # new(wip)
'getmarkets/eu', # new(wip)
'getmarkets', # or 'markets'
'getboard', # ...
'getticker',
'getexecutions',
'gethealth',
'getboardstate',
'getchats',
],
},
'private': {
'get': [
'getpermissions',
'getbalance',
'getbalancehistory',
'getcollateral',
'getcollateralhistory',
'getcollateralaccounts',
'getaddresses',
'getcoinins',
'getcoinouts',
'getbankaccounts',
'getdeposits',
'getwithdrawals',
'getchildorders',
'getparentorders',
'getparentorder',
'getexecutions',
'getpositions',
'gettradingcommission',
],
'post': [
'sendcoin',
'withdraw',
'sendchildorder',
'cancelchildorder',
'sendparentorder',
'cancelparentorder',
'cancelallchildorders',
],
},
},
'fees': {
'trading': {
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'BTC/JPY': {
'maker': 0.15 / 100,
'taker': 0.15 / 100,
},
},
})
async def fetch_markets(self, params={}):
jp_markets = await self.publicGetGetmarkets(params)
us_markets = await self.publicGetGetmarketsUsa(params)
eu_markets = await self.publicGetGetmarketsEu(params)
markets = self.array_concat(jp_markets, us_markets)
markets = self.array_concat(markets, eu_markets)
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'product_code')
currencies = id.split('_')
baseId = None
quoteId = None
base = None
quote = None
numCurrencies = len(currencies)
if numCurrencies == 1:
baseId = id[0:3]
quoteId = id[3:6]
elif numCurrencies == 2:
baseId = currencies[0]
quoteId = currencies[1]
else:
baseId = currencies[1]
quoteId = currencies[2]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if (numCurrencies == 2) else id
fees = self.safe_value(self.fees, symbol, self.fees['trading'])
maker = self.safe_value(fees, 'maker', self.fees['trading']['maker'])
taker = self.safe_value(fees, 'taker', self.fees['trading']['taker'])
spot = True
future = False
type = 'spot'
if ('alias' in list(market.keys())) or (currencies[0] == 'FX'):
type = 'future'
future = True
spot = False
maker = 0.0
taker = 0.0
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'maker': maker,
'taker': taker,
'type': type,
'spot': spot,
'future': future,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetGetbalance(params)
#
# [
# {
# "currency_code": "JPY",
# "amount": 1024078,
# "available": 508000
# },
# {
# "currency_code": "BTC",
# "amount": 10.24,
# "available": 4.12
# },
# {
# "currency_code": "ETH",
# "amount": 20.48,
# "available": 16.38
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'amount')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
}
orderbook = await self.publicGetGetboard(self.extend(request, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'size')
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
}
ticker = await self.publicGetGetticker(self.extend(request, params))
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
last = self.safe_float(ticker, 'ltp')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume_by_product'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
side = self.safe_string_lower(trade, 'side')
if side is not None:
if len(side) < 1:
side = None
order = None
if side is not None:
id = side + '_child_order_acceptance_id'
if id in trade:
order = trade[id]
if order is None:
order = self.safe_string(trade, 'child_order_acceptance_id')
timestamp = self.parse8601(self.safe_string(trade, 'exec_date'))
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'size')
cost = None
if amount is not None:
if price is not None:
cost = price * amount
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': order,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
response = await self.publicGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
'child_order_type': type.upper(),
'side': side.upper(),
'price': price,
'size': amount,
}
result = await self.privatePostSendchildorder(self.extend(request, params))
# {"status": - 200, "error_message": "Insufficient funds", "data": null}
id = self.safe_string(result, 'child_order_acceptance_id')
return {
'info': result,
'id': id,
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a `symbol` argument')
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
'child_order_acceptance_id': id,
}
return await self.privatePostCancelchildorder(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'ACTIVE': 'open',
'COMPLETED': 'closed',
'CANCELED': 'canceled',
'EXPIRED': 'canceled',
'REJECTED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = self.parse8601(self.safe_string(order, 'child_order_date'))
amount = self.safe_float(order, 'size')
remaining = self.safe_float(order, 'outstanding_size')
filled = self.safe_float(order, 'executed_size')
price = self.safe_float(order, 'price')
cost = price * filled
status = self.parse_order_status(self.safe_string(order, 'child_order_state'))
type = self.safe_string_lower(order, 'child_order_type')
side = self.safe_string_lower(order, 'side')
symbol = None
if market is None:
marketId = self.safe_string(order, 'product_code')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
fee = None
feeCost = self.safe_float(order, 'total_commission')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
'rate': None,
}
id = self.safe_string(order, 'child_order_acceptance_id')
return {
'id': id,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
}
async def fetch_orders(self, symbol=None, since=None, limit=100, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a `symbol` argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
'count': limit,
}
response = await self.privateGetGetchildorders(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
async def fetch_open_orders(self, symbol=None, since=None, limit=100, params={}):
request = {
'child_order_state': 'ACTIVE',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=100, params={}):
request = {
'child_order_state': 'COMPLETED',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a `symbol` argument')
orders = await self.fetch_orders(symbol)
ordersById = self.index_by(orders, 'id')
if id in ordersById:
return ordersById[id]
raise OrderNotFound(self.id + ' No order found with id ' + id)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a `symbol` argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
if limit is not None:
request['count'] = limit
response = await self.privateGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
if code != 'JPY' and code != 'USD' and code != 'EUR':
raise ExchangeError(self.id + ' allows withdrawing JPY, USD, EUR only, ' + code + ' is not supported')
currency = self.currency(code)
request = {
'currency_code': currency['id'],
'amount': amount,
# 'bank_account_id': 1234,
}
response = await self.privatePostWithdraw(self.extend(request, params))
id = self.safe_string(response, 'message_id')
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.version + '/'
if api == 'private':
request += 'me/'
request += path
if method == 'GET':
if params:
request += '?' + self.urlencode(params)
url = self.urls['api'] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
auth = ''.join([nonce, method, request])
if params:
if method != 'GET':
body = self.json(params)
auth += body
headers = {
'ACCESS-KEY': self.apiKey,
'ACCESS-TIMESTAMP': nonce,
'ACCESS-SIGN': self.hmac(self.encode(auth), self.encode(self.secret)),
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| [
"wangming19871126@gmail.com"
] | wangming19871126@gmail.com |
a7c06dc93cc7febe105c9c5e02e838264589e283 | bb142df010298fb4fa51d021a869bc99f689541b | /arelle/plugin/security/cryptAES_CBC.py | 47f32f7e35b403f4958f998c77ac43e52b7dc2da | [
"Apache-2.0"
] | permissive | fritzh321/Arelle | 6d7a7363716d52e3bf2f788c43a50de5f84edaa3 | fd585c7a5cef067ae213059bb864c4d32f937eb5 | refs/heads/master | 2020-04-09T22:50:02.115753 | 2018-12-02T16:33:20 | 2018-12-02T16:33:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,957 | py | '''
Created on June 7, 2018
@author: Mark V Systems Limited
(c) Copyright 2018 Mark V Systems Limited, All rights reserved.
Template crypt module using AES CBC mode.
Customize for an integrated security environment
Input file parameters may be in JSON (without newlines for pretty printing as below):
[ {"file": "file path to instance or inline xhtml",
"key": "base 64 encoded key",
"iv": "base 64 encoded iv",
... (any other custom entrypoint parameters)
},
{"file": "file 2"...
]
On Windows, the input file argument must be specially quoted if passed in via Java
due to a Java bug on Windows shell interface (without the newlines for pretty printing below):
"[{\"file\":\"z:\\Documents\\dir\\gpc_gd1-20130930.htm\",
\"key\": \"base 64 encoded key\",
\"iv\": \"base 64 encoded iv\",
... (any other custom entrypoint parameters)
}]"
The ownerObject may be a validation object related to the instance or to a collection of instances.
Customize method of detecting an encrypted file. This example appends "~" to distinguish files which are encrypted.
'''
import os, io, base64
from arelle import FileSource, XmlUtil
AES = None # Cipher.Crypto AES is only imported if an encrypted input is noticed
ENCRYPTED_FILE_SUFFIX = "~" # appended to any file which has been encrypted
def securityInit(ownerObject, options, filesource, entrypointfiles, sourceZipStream):
ownerObject.hasEncryption = False
ownerObject.cipherKey = None
ownerObject.cipherIv = None
def securityFilingStart(ownerObject, options, filesource, entrypointfiles, sourceZipStream):
# check if any files have an encryption key specified, if so activate security
if isinstance(entrypointfiles, list) and any("key" in entrypointfile for entrypointfile in entrypointfiles):
# AES encryption must be installed
global AES
from Crypto.Cipher import AES # must have AES encryption loaded in server
ownerObject.hasEncryption = True
def securityFileSourceExists(ownerObject, filepath):
# handle FileSource existence requests which might involve encrypted files
if ownerObject.hasEncryption and os.path.exists(filepath + ENCRYPTED_FILE_SUFFIX):
return True
return None
def securityFileSourceFile(cntlr, ownerObject, filepath, binary, stripDeclaration):
# handle FileSource file requests which can return encrypted contents
if ownerObject.hasEncryption:
for entrypointfile in ownerObject.entrypointfiles:
if filepath == entrypointfile["file"] and "key" in entrypointfile and "iv" in entrypointfile:
ownerObject.cipherIv = base64.decodebytes(entrypointfile["iv"].encode())
ownerObject.cipherKey = base64.decodebytes(entrypointfile["key"].encode())
break # set new iv, key based on entrypointfiles
# may be a non-entry file (xsd, linkbase, jpg) using entry's iv, key
if os.path.exists(filepath + ENCRYPTED_FILE_SUFFIX) and ownerObject.cipherKey is not None and ownerObject.cipherIv is not None:
encrdata = io.open(filepath + ENCRYPTED_FILE_SUFFIX, "rb").read()
cipher = AES.new(ownerObject.cipherKey, AES.MODE_CBC, iv=ownerObject.cipherIv)
bytesdata = cipher.decrypt(encrdata)
encrdata = None # dereference before decode operation
if binary: # return bytes
return (FileSource.FileNamedBytesIO(filepath, bytesdata[0:-bytesdata[-1]]), ) # trim AES CBC padding
# detect encoding if there is an XML header
encoding = XmlUtil.encoding(bytesdata[0:512],
default=cntlr.modelManager.disclosureSystem.defaultXmlEncoding
if cntlr else 'utf-8')
# return decoded string
text = bytesdata[0:-bytesdata[-1]].decode(encoding or 'utf-8') # trim AES CBC padding and decode
bytesdata = None # dereference before text operation
if stripDeclaration: # file source may strip XML declaration for libxml
xmlDeclarationMatch = FileSource.XMLdeclaration.search(text)
if xmlDeclarationMatch: # remove it for lxml
start,end = xmlDeclarationMatch.span()
text = text[0:start] + text[end:]
return (FileSource.FileNamedStringIO(filepath, initial_value=text), encoding)
return None
def securityWrite(ownerObject, filepath, data):
if ownerObject.hasEncryption and ownerObject.cipherKey is not None and ownerObject.cipherIv is not None:
cipher = AES.new(ownerObject.cipherKey, AES.MODE_CBC, iv=ownerObject.cipherIv)
if isinstance(data, str): # encode string into bytes
bytesdata = data.encode("utf-8")
else: # data is binary, doesn't need encoding
bytesdata = data
padlength = 16 - (len(bytesdata) % 16) # AES CBC padding
bytesdata += padlength * (chr(padlength).encode())
encrdata = cipher.encrypt(bytesdata)
if isinstance(data, str): bytesdata = None # dereference before open operation
with open(filepath + ENCRYPTED_FILE_SUFFIX, "wb") as fh:
fh.write(encrdata)
return True # written successfully
return None
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Security Crypt AES_CBC',
'version': '1.0',
'description': '''AES_CBC security encryption''',
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2018 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'Security.Crypt.Init': securityInit,
'Security.Crypt.Filing.Start': securityFilingStart,
'Security.Crypt.FileSource.Exists': securityFileSourceExists,
'Security.Crypt.FileSource.File': securityFileSourceFile,
'Security.Crypt.Write': securityWrite
}
| [
"fischer@markv.com"
] | fischer@markv.com |
6de2299a08c10405b614b3e10584176fc4c0a16f | 6a194f60c57f00d89467a551696a6d12c6494ca5 | /migrations/versions/b67f74365ac0_updated_the_classes.py | 6906146f3c3e3c17f86d5eff34066880f6b860a3 | [
"MIT"
] | permissive | GeGe-K/Pitcher-App | 3479bc41b0dd431592db87dd9bf94ca59645e2d6 | 4a970b37fe0fcd63ad3853a4f764c410a4acb640 | refs/heads/master | 2020-04-06T22:37:58.522524 | 2018-11-26T08:41:18 | 2018-11-26T08:41:18 | 157,842,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """updated the classes.
Revision ID: b67f74365ac0
Revises: 9d0c25ad18b3
Create Date: 2018-11-20 08:24:19.781368
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b67f74365ac0'
down_revision = '9d0c25ad18b3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('comments', sa.Column('posted', sa.DateTime(), nullable=True))
op.drop_column('comments', 'date')
op.drop_column('comments', 'time')
op.add_column('pitches', sa.Column('posted', sa.DateTime(), nullable=True))
op.drop_column('pitches', 'date')
op.drop_column('pitches', 'time')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('time', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('pitches', 'posted')
op.add_column('comments', sa.Column('time', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('comments', sa.Column('date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('comments', 'posted')
# ### end Alembic commands ###
| [
"gloriagivondo@gmail.com"
] | gloriagivondo@gmail.com |
11a6fe0692f20dd9838d132b112065aff12e988d | 2f30cf20d58e2cde4037441e67213223c69a6998 | /lesson32_接口总结/d2_time_log.py | dc2d95c037ebc1ecda9cf0ab9e2c2fdab64b67c4 | [] | no_license | zengcong1314/python1205 | b11db7de7d0ad1f8401b8b0c9b20024b4405ae6c | da800ed3374d1d43eb75485588ddb8c3a159bb41 | refs/heads/master | 2023-05-25T07:17:25.065004 | 2021-06-08T08:27:54 | 2021-06-08T08:27:54 | 318,685,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # 根据时间生成新文件
import logging
import time
from logging.handlers import TimedRotatingFileHandler
logger = logging.getLogger('python36')
handler = TimedRotatingFileHandler('time12.log',when='s',interval=2,backupCount=100,encoding='UTF-8')
logger.addHandler(handler)
for i in range(100):
logger.warning("生成警告信息{}".format(time.time()))
time.sleep(0.1)
| [
"237886015@qq.com"
] | 237886015@qq.com |
68b702c7709ce4ab311d3fa7fb54a30b2284e31d | bd3fb18aef0bf47eb6410107d939134cffc3a1ae | /0-jakc/jakc_sale/models/procurement.py | 976173c9d8ebca1145d8f77ed6cb1eee4a2fdaf9 | [] | no_license | rapolebas/project-0021 | 477227f7359c893df891c1d98da82d45b6cfcdbe | 4e65ca034be5ff4c7a9c91a988db85ec37392452 | refs/heads/master | 2020-03-16T02:32:32.213774 | 2017-12-02T07:59:15 | 2017-12-02T07:59:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from datetime import datetime, timedelta
from openerp import SUPERUSER_ID
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
from openerp.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class ProcurementOrder(models.Model):
_inherit = ['procurement.order']
sale_order_id = fields.Many2one('sale.order','Sale Order', related='sale_line_id.order_id', readonly=True)
partner_vehicle_id = fields.Many2one('partner.vehicle', related='sale_order_id.partner_vehicle_id', readonly=True, string='Vehicle')
| [
"wahhid@gmail.com"
] | wahhid@gmail.com |
4a80d8be5dbcc1b93e738373fe9b598f7b96a3e3 | ad8de2c69a4d3692af2ce14aaa31ba97de95f24f | /project_code/Example Scripts/plot_oneclass.py | 28048908c1cd94c8c477ae1709fb92532a28fc17 | [
"MIT"
] | permissive | e-koch/Phys-595 | b04f7179879b3f6ff8a6d608be8667b892b874d9 | 44872fa47609242f7aa8671eb75851622516129f | refs/heads/master | 2021-01-25T05:28:09.497201 | 2015-09-10T04:58:26 | 2015-09-10T04:58:26 | 24,083,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,557 | py | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
np.random.seed(1029344)
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(1000000, 5) # + np.random.uniform(low=-1, high=1, size=(100,2))
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20000, 5)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20000, 5))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
# plt.xlabel(
# "error train: %d/200 ; errors novel regular: %d/40 ; "
# "errors novel abnormal: %d/40"
# % (n_error_train, n_error_test, n_error_outliers))
plt.show()
print "error train: %d/200 ; errors novel regular: %d/40 ; "\
"errors novel abnormal: %d/40" \
% (n_error_train, n_error_test, n_error_outliers) | [
"koch.eric.w@gmail.com"
] | koch.eric.w@gmail.com |
a4e6f962ca98d0ce434961e1858aedd883e2937e | dfc827bf144be6edf735a8b59b000d8216e4bb00 | /CODE/experimentcode/DrybedHoriz/stabilisationtermuto0/Run.py | 8e13f5080338655f55c6395ecb2358b0a48709d4 | [] | no_license | jordanpitt3141/ALL | c5f55e2642d4c18b63b4226ddf7c8ca492c8163c | 3f35c9d8e422e9088fe096a267efda2031ba0123 | refs/heads/master | 2020-07-12T16:26:59.684440 | 2019-05-08T04:12:26 | 2019-05-08T04:12:26 | 94,275,573 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,880 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 14:24:26 2017
@author: jp
"""
from Serre2dc import *
from scipy import *
from pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog
import csv
import os
from numpy.linalg import norm,solve
from time import time
from scipy.interpolate import interp1d
from scipy import signal
from scipy import sqrt
from numpy.fft import fft
def DrybedANA(h1,x,t,g):
n = len(x)
u = zeros(n)
h = zeros(n)
G = zeros(n)
for i in range(n):
if(x[i] >= -t*sqrt(g*h1) and x[i] <= 2*t*sqrt(g*h1) ):
u[i] = 2.0 / 3.0 *(sqrt(g*h1) + x[i] / t)
h[i] = 4.0 / (9.0 * g) *(sqrt(g*h1) - 0.5*x[i] / t)**2
ux = 2.0 / 3.0 *(1.0 / t)
uxx = 0
hx = 2.0 / (9.0 * g * t*t) *(x[i] - 2*t*sqrt(g*h1))
G[i] = u[i]*h[i] - h[i]*h[i]*hx*ux
elif(x[i] < -t*sqrt(g*h1)):
h[i] = h1
return h,u, G
def copyarraytoC(a):
n = len(a)
b = mallocPy(n)
for i in range(n):
writetomem(b,i,a[i])
return b
def copyarrayfromC(a,n):
b = [0]*n
for i in range(n):
b[i] = readfrommem(a,i)
return b
def copywritearraytoC(a,b):
n = len(a)
for i in range(n):
writetomem(b,i,a[i])
#FD solution
#gives exact up to linears, so is second order accurate huzzah
def getGfromupy(h,u,bed,u0,u1,h0,h1,b0,b1,dx):
idx = 1.0 / dx
ithree = 1.0 / 3.0
n = len(h)
G = zeros(n)
for i in range(1,n-1):
th = h[i]
thx = 0.5*idx*(h[i+1] - h[i-1])
tbx = 0.5*idx*(bed[i+1] - bed[i-1])
tbxx = idx*idx*(bed[i+1] -2*bed[i] + bed[i-1])
D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx
ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx
bi = D + 2.0*ithree*idx*idx*th*th*th
ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx
G[i] = ai*u[i-1] + bi*u[i] + ci*u[i+1]
#boundary
#i=0
i=0
th = h[i]
thx = 0.5*idx*(h[i+1] - h0)
tbx = 0.5*idx*(bed[i+1] - b0)
tbxx = idx*idx*(bed[i+1] -2*bed[i] + b0)
D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx
ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx
bi = D + 2.0*ithree*idx*idx*th*th*th
ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx
G[i] = ai*u0 + bi*u[i] + ci*u[i+1]
#i = n-1
i = n-1
th = h[i]
thx = 0.5*idx*(h1 - h[i-1])
tbx = 0.5*idx*(b1 - bed[i-1])
tbxx = idx*idx*(b1 -2*bed[i] + bed[i-1])
D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx
ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx
bi = D + 2.0*ithree*idx*idx*th*th*th
ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx
G[i] = ai*u[i-1] + bi*u[i] + ci*u1
return G
def MollifyFunc(C,x):
if(abs(x) <1):
return C*exp(1.0/(abs(x)**2 - 1))
else:
return 0
def Dambreak(h0,h1,x0,x):
n = len(x)
h = zeros(n)
u = zeros(n)
G = zeros(n)
b = zeros(n)
for i in range(n):
if(x[i] > x0):
h[i] = h0
else:
h[i] = h1
return h,u,G,b
def DambreakS(h0,h1,x0,x,diffuse):
n = len(x)
h = zeros(n)
u = zeros(n)
G = zeros(n)
b = zeros(n)
for i in range(n):
h[i] = h0 + 0.5*(h1 - h0)*(1 + tanh(diffuse*(x0 - x[i])))
return h,u,G,b
def DamNreakDRYANA(h1,x,t,g):
n = len(x)
bed = zeros(n)
h, u,G = DrybedANA(h1,x,t,g)
G1 = getGfromupy(h,u,bed,0,0,h[0],h[-1],bed[0],bed[-1],dx)
return h,u,G,G1
def solitoninitGana(a0,a1,g,x,t0,bot,dx):
n = len(x)
h = zeros(n)
G = zeros(n)
bx = zeros(n)
u = zeros(n)
ux = zeros(n)
c = sqrt(g*(a0 + a1))
k = sqrt(3.0*a1) / (2.0*a0 *sqrt(a0 + a1))
i3 = 1.0/ 3.0
for i in range(n):
phi = x[i] - c*t0;
sechkphi = (2./(exp(k*phi) + exp(-k*phi)))
tanhkphi = sechkphi*((exp(k*phi) - exp(-k*phi))/2.0)
hdx = -2*a1*k*tanhkphi*sechkphi*sechkphi
hdxx = a1*(4*k*k*tanhkphi*tanhkphi*sechkphi*sechkphi - 2*k*k*sechkphi*sechkphi*sechkphi*sechkphi)
bx[i] = bot
h[i] = a0 + a1*sechkphi*sechkphi
u[i] = c* ((h[i] - a0) / h[i])
ux[i] = (a0*c*hdx/(h[i]*h[i]))
G[i] = u[i]*h[i] - i3*h[i]*h[i]*h[i]*(a0*c*(h[i]*hdxx - 2*hdx*hdx)/(h[i]*h[i]*h[i])) - h[i]*h[i]*hdx*(a0*c*hdx/(h[i]*h[i]))
return h,u,G,bx,ux
"""
#Solver
#So our solver solves the analytic soliton problem with second order accuracy in h, u and G.
wdir = "../../../../../data/raw/DryTest/Solver/Soliton/theta1/"
if not os.path.exists(wdir):
os.makedirs(wdir)
s = wdir + "norms.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(['dx','theta','hnorm', 'Gnorm', 'unorm', 'Rhnorm', 'RGnorm', 'dunorm'])
for j in range(14):
a0 = 1
a1 = 0.7
g = 9.81
t0 = 0
bot = 0
dx = 1.0 / 2**j
l = 0.5 / sqrt(g*(a0 + a1))
dt = l*dx
startx = -20
endx = 20 + 0.9*dx
startt = 0.0
endt = 1 + dt
szoomx = startx
ezoomx = endx
t0 = 0
#x,t = makevar(startx,endx +0.1*dx,dx,startt,endt,dt)
x = arange(startx,endx +0.1*dx, dx)
xG = concatenate((array([x[0] - dx]),x,array([x[-1] + dx])))
ts = []
n = len(x)
theta = 1
gap = int(1.0/dt)
nBC = 2
GhnBC = 3
unBC = 3
nGhhbc = 3*n + 2*(GhnBC)
nubc =2*n -1 + 2*unBC
idx = 1.0 / dx
h,u,G,bx,ux = solitoninitGana(a0,a1,g,x,t0,bot,dx)
hMbeg = h[0]*ones(GhnBC)
GMbeg = G[0]*ones(GhnBC)
hMend = h[-1]*ones(GhnBC)
GMend = G[-1]*ones(GhnBC)
uMbeg = u[0]*ones(unBC)
uMend = u[-1]*ones(unBC)
h_c = copyarraytoC(h)
G_c = copyarraytoC(G)
x_c = copyarraytoC(x)
u_c = mallocPy(n)
hMbeg_c = copyarraytoC(hMbeg)
hMend_c = copyarraytoC(hMend)
GMbeg_c = copyarraytoC(GMbeg)
GMend_c = copyarraytoC(GMend)
uMbeg_c = copyarraytoC(uMbeg)
uMend_c = copyarraytoC(uMend)
ubc_c = mallocPy(nubc)
hhbc_c = mallocPy(nGhhbc)
Ghbc_c = mallocPy(nGhhbc)
hp_c = mallocPy(n)
Gp_c = mallocPy(n)
hpp_c = mallocPy(n)
Gpp_c = mallocPy(n)
ct = 0
while ct < endt:
evolvewrapperconsistenttime(G_c, h_c,hMbeg_c , hMend_c,GMbeg_c ,GMend_c,uMbeg_c,uMend_c,g,dx, dt,n,GhnBC,unBC,nGhhbc,nubc,theta, hhbc_c,Ghbc_c,ubc_c,Gp_c,hp_c, Gpp_c,hpp_c)
ct = ct + dt
print(ct)
hC = copyarrayfromC(h_c,n)
GC = copyarrayfromC(G_c,n)
getufromG(h_c, G_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,theta,dx,n,2*n +1,GhnBC,unBC,nGhhbc,nubc,ubc_c,hhbc_c,Ghbc_c)
ubcC = copyarrayfromC(ubc_c,nubc)
uCti = ubcC[unBC:-unBC:2]
hhbcC = copyarrayfromC(hhbc_c,nGhhbc)
GhbcC = copyarrayfromC(Ghbc_c,nGhhbc)
hA,uA,GA,bx_ta,ux_ta = solitoninitGana(a0,a1,g,x,t0 + ct,bot,dx)
unorm = norm(uA - uCti,ord =1) / norm(uA,ord=1)
hnorm = norm(hA - hC,ord =1) / norm(hA,ord=1)
Gnorm = norm(GA - GC,ord =1) / norm(GA,ord=1)
s = wdir + "h.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",hnorm)
file1.write(s)
s = wdir + "G.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",Gnorm)
file1.write(s)
s = wdir + "u.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",unorm)
file1.write(s)
"""
## This FEM reconstructs the soliton problem (with analytic G) with second order accuracy for h (G) at centres and edges and u and du at the edges
"""
wdir = "../../../../../data/raw/DryTest/FEM/Soliton/theta1/"
if not os.path.exists(wdir):
os.makedirs(wdir)
s = wdir + "norms.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(['dx','theta','hnorm', 'Gnorm', 'unorm', 'Rhnorm', 'RGnorm', 'dunorm'])
for j in range(15):
a0 = 1
a1 = 0.7
g = 9.81
t0 = 0
bot = 0
dx = 1.0 / 2**j
l = 0.5 / sqrt(g*(a0 + a1))
dt = l*dx
startx = -200
endx = 200 + 0.9*dx
startt = 0.0
endt = 50
szoomx = startx
ezoomx = endx
t0 = 0
#x,t = makevar(startx,endx +0.1*dx,dx,startt,endt,dt)
x = arange(startx,endx +0.1*dx, dx)
xG = concatenate((array([x[0] - dx]),x,array([x[-1] + dx])))
ts = []
n = len(x)
theta = 1
gap = int(1.0/dt)
nBC = 2
GhnBC = 3
unBC = 3
nGhhbc = 3*n + 2*(GhnBC)
nubc =2*n -1 + 2*unBC
idx = 1.0 / dx
h,u,G,bx,ux = solitoninitGana(a0,a1,g,x,t0,bot,dx)
hMbeg = h[0]*ones(GhnBC)
GMbeg = G[0]*ones(GhnBC)
hMend = h[-1]*ones(GhnBC)
GMend = G[-1]*ones(GhnBC)
uMbeg = u[0]*ones(unBC)
uMend = u[-1]*ones(unBC)
h_c = copyarraytoC(h)
G_c = copyarraytoC(G)
x_c = copyarraytoC(x)
u_c = mallocPy(n)
hMbeg_c = copyarraytoC(hMbeg)
hMend_c = copyarraytoC(hMend)
GMbeg_c = copyarraytoC(GMbeg)
GMend_c = copyarraytoC(GMend)
uMbeg_c = copyarraytoC(uMbeg)
uMend_c = copyarraytoC(uMend)
ubc_c = mallocPy(nubc)
hhbc_c = mallocPy(nGhhbc)
Ghbc_c = mallocPy(nGhhbc)
getufromG(h_c, G_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,theta,dx,n,2*n +1,GhnBC,unBC,nGhhbc,nubc,ubc_c,hhbc_c,Ghbc_c)
hC = copyarrayfromC(h_c,n)
GC = copyarrayfromC(G_c,n)
ubcC = copyarrayfromC(ubc_c,nubc)
uCti = ubcC[unBC:-unBC:2]
hhbcC = copyarrayfromC(hhbc_c,nGhhbc)
GhbcC = copyarrayfromC(Ghbc_c,nGhhbc)
#Calculate u gradients
du = []
xdu = []
for i in range(n):
uai =2*idx*idx*(ubcC[2*i + unBC - 1] - 2*ubcC[2*i + unBC] + ubcC[2*i + unBC + 1])
ubi =idx*(-ubcC[2*i + unBC - 1]+ ubcC[2*i + unBC + 1])
duiph = uai*(dx) + ubi;
duimh = -uai*(dx) + ubi;
du.append(duimh)
du.append(duiph)
xdu.append(x[i] - 0.5*dx)
xdu.append(x[i] + 0.5*dx)
hh,hu,hG,hbx,hux = solitoninitGana(a0,a1,g,xdu,t0,bot,dx)
xhbc = []
xubc = []
for i in range(len(xG)):
if(i ==0):
xubc.append(xG[i] - 0.5*dx)
xubc.append(xG[i])
xubc.append(xG[i] + 0.5*dx)
else:
xubc.append(xG[i])
xubc.append(xG[i] + 0.5*dx)
xhbc.append(xG[i] - 0.5*dx)
xhbc.append(xG[i])
xhbc.append(xG[i] + 0.5*dx)
xubc = array(xubc)
xhbc = array(xhbc)
Rh,Ru,RG,Rbx,Rux = solitoninitGana(a0,a1,g,xhbc,t0,bot,dx)
unorm = norm(u - uCti,ord =1) / norm(u,ord=1)
hnorm = norm(h - hC,ord =1) / norm(h,ord=1)
Gnorm = norm(G - GC,ord =1) / norm(G,ord=1)
# derivatives and reconstructions
rhnorm = norm(Rh - hhbcC,ord =1) / norm(Rh,ord=1)
rGnorm = norm(RG - GhbcC,ord =1) / norm(RG,ord=1)
dunorm = norm(hux - du,ord =1) / norm(hux,ord=1)
s = wdir + "norms.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow([str(dx),str(theta),str(hnorm), str(Gnorm), str(unorm),str(rhnorm),str(rGnorm), str(dunorm)])
s = wdir + "h.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",hnorm)
file1.write(s)
s = wdir + "G.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",Gnorm)
file1.write(s)
s = wdir + "u.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",unorm)
file1.write(s)
s = wdir + "rh.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",rhnorm)
file1.write(s)
s = wdir + "rG.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",rGnorm)
file1.write(s)
s = wdir + "h.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",hnorm)
file1.write(s)
s = wdir + "du.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",dunorm)
file1.write(s)
"""
#Dry bed test
h0 = 0.0
h1 =0.228
x0 = 0
g = 9.81
t0 = 0
dx = 0.01
l = 0.5 / sqrt(g*(h1 + h0))
dt = l*dx
startx = -50
endx = 50 + 0.9*dx
startt = t0
endt = 5+ t0
szoomx = startx
ezoomx = endx
#x,t = makevar(startx,endx +0.1*dx,dx,startt,endt,dt)
x = arange(startx,endx +0.1*dx, dx)
xG = concatenate((array([x[0] - dx]),x,array([x[-1] + dx])))
ts = []
n = len(x)
theta = 1
gap = int(1.0/dt)
nBC = 2
GhnBC = 3
unBC = 3
nGhhbc = 3*n + 2*(GhnBC)
nubc =2*n -1 + 2*unBC
idx = 1.0 / dx
#FEM handles dry dam-break with 0 height and 0 velocity well
#h,u,G,b = Dambreak(h0,h1,x0,x)
#h,u,G,b = Dambreak(h0,h1,x0,x)
h,u,G,G1 =DamNreakDRYANA(h1,x,t0,g)
hMbeg = h[0]*ones(GhnBC)
GMbeg = G[0]*ones(GhnBC)
hMend = h[-1]*ones(GhnBC)
GMend = G[-1]*ones(GhnBC)
uMbeg = u[0]*ones(unBC)
uMend = u[-1]*ones(unBC)
h_c = copyarraytoC(h)
G_c = copyarraytoC(G)
x_c = copyarraytoC(x)
u_c = mallocPy(n)
hMbeg_c = copyarraytoC(hMbeg)
hMend_c = copyarraytoC(hMend)
GMbeg_c = copyarraytoC(GMbeg)
GMend_c = copyarraytoC(GMend)
uMbeg_c = copyarraytoC(uMbeg)
uMend_c = copyarraytoC(uMend)
ubc_c = mallocPy(nubc)
hhbc_c = mallocPy(nGhhbc)
Ghbc_c = mallocPy(nGhhbc)
hp_c = mallocPy(n)
Gp_c = mallocPy(n)
hpp_c = mallocPy(n)
Gpp_c = mallocPy(n)
ct = startt
while ct < endt:
#evolvewrapperconsistenttime(G_c, h_c,hMbeg_c , hMend_c,GMbeg_c ,GMend_c,uMbeg_c,uMend_c,g,dx, dt,n,GhnBC,unBC,nGhhbc,nubc,theta, hhbc_c,Ghbc_c,ubc_c,Gp_c,hp_c, Gpp_c,hpp_c)
dt = evolvewrapperADAP(G_c, h_c,hMbeg_c , hMend_c,GMbeg_c ,GMend_c,uMbeg_c,uMend_c,g,dx, dt,n,GhnBC,unBC,nGhhbc,nubc,theta, hhbc_c,Ghbc_c,ubc_c,Gp_c,hp_c, Gpp_c,hpp_c)
ct = ct + dt
if(dt < 10**-8):
break
print(ct)
hC = copyarrayfromC(h_c,n)
GC = copyarrayfromC(G_c,n)
hF,uF,GF,G1F =DamNreakDRYANA(h1,x,ct,g)
getufromG(h_c, G_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,theta,dx,n,2*n +1,GhnBC,unBC,nGhhbc,nubc,ubc_c,hhbc_c,Ghbc_c)
ubcC = copyarrayfromC(ubc_c,nubc)
ufC = ubcC[unBC:-unBC:2]
deallocPy(h_c)
deallocPy(G_c)
deallocPy(hp_c)
deallocPy(Gp_c)
deallocPy(hpp_c)
deallocPy(Gpp_c)
deallocPy(u_c)
deallocPy(ubc_c)
deallocPy(hhbc_c)
deallocPy(Ghbc_c)
deallocPy(hMbeg_c)
deallocPy(GMbeg_c)
deallocPy(uMbeg_c)
deallocPy(hMend_c)
deallocPy(GMend_c)
deallocPy(uMend_c)
| [
"jordanpitt3141@github.com"
] | jordanpitt3141@github.com |
f3e82229dd7ad3dce9fa4f95ba275f4f42e1397d | dbf635c24ed9eff228ffaf35e71dcfd3712891a5 | /acoustic/COVAREP/sentence_level_format/archived_models/archived_model_4/load_model.py | a663a69b3fee68df98be40a215253ee114384130 | [] | no_license | aascode/depression_estimation_from_individual_modalities | 57f3b6ebf740585c9cb3d5821028969e2f36e4d1 | 6e1563b4081c4aadc91f7110c684290b7a622167 | refs/heads/master | 2022-01-14T20:41:33.333739 | 2019-05-06T18:30:11 | 2019-05-06T18:30:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | import numpy as np
import keras
from keras.models import Model, Sequential, load_model
from keras.layers import Dense, CuDNNLSTM, Input, Concatenate, Dropout
from keras import regularizers
def load_model(location = None):
if(location != None):
model = keras.models.load_model(location)
print("Loaded the model.")
return model
X = Input(shape = (4000, 74,))
X_gender = Input(shape = (1,))
Y = CuDNNLSTM(84, name = 'lstm_cell')(X)
Y = Dropout(rate = 0.2)(Y)
Y = Concatenate(axis = -1)([Y, X_gender])
Y = Dense(42, activation = 'relu')(Y)
Y = Dropout(rate = 0.2)(Y)
Y = Dense(20, activation = 'relu')(Y)
Y = Dropout(rate = 0.2)(Y)
Y = Dense(1, activation = None)(Y)
model = Model(inputs = [X, X_gender], outputs = Y)
print("Created a new model.")
return model
if(__name__ == "__main__"):
m = load_model() | [
"arbaaz.qureshi29@gmail.com"
] | arbaaz.qureshi29@gmail.com |
340d1b477b1dd67a4c8461aabf6a05268df3064b | 3c358b34811ad9d178e2865336498dde3f3e5032 | /WAFLEX/server/mymail.py | 8db59a99094a40f33d3899b37367ca1491db7699 | [] | no_license | shywj05/WAFLEX-MiddelProject | b255796839c889a16c4900a87f2e5adcd1337a44 | ca8db1e368104f75218a8da9a0f987349d27f755 | refs/heads/master | 2023-06-30T23:50:22.367106 | 2021-08-03T01:15:11 | 2021-08-03T01:15:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | import smtplib
from email.mime.text import MIMEText
import string
import random
class MyMail:
def mysendmail(self, recvEmail, title):
smtpName = "smtp.gmail.com" #smtp 서버 주소
smtpPort = 587 #smtp 포트 번호
sendEmail = "sysojxx@gmail.com"
password = "Qwe123!@#"
_LENGTH = 8 # 8자리
# 숫자 + 대소문자 + 특수문자
alpha_s = string.ascii_lowercase
alpha_b = string.ascii_uppercase
digit = string.digits
temp = ['~','!','@','#','$','%','^','*']
# 랜덤한 문자열 생성
result = alpha_s[random.randrange(0, 26)] + alpha_s[random.randrange(0, 26)]
result += alpha_b[random.randrange(0, 26)] + alpha_b[random.randrange(0, 26)]
result += digit[random.randrange(0, 10)] + digit[random.randrange(0, 10)]
result += temp[random.randrange(len(temp))] + temp[random.randrange(len(temp))]
text = "인증하실 번호는 " +result+" 입니다."
msg = MIMEText(text) #MIMEText(text , _charset = "utf8")
msg['Subject'] = title
msg['From'] = sendEmail
msg['To'] = recvEmail
print(msg.as_string())
s=smtplib.SMTP( smtpName , smtpPort ) #메일 서버 연결
s.starttls() #TLS 보안 처리
s.login( sendEmail , password ) #로그인
s.sendmail( sendEmail, recvEmail, msg.as_string() ) #메일 전송, 문자열로 변환하여 보냅니다.
s.close() #smtp 서버 연결을 종료합니다.
return result
| [
"shywj05@gmail.com"
] | shywj05@gmail.com |
d24d26e9d5ed8d25813644ad2f4e81cb5ebce786 | 82fe367292a7f02a3e0285cf4eb82c64dc1320b3 | /fuchsia/test/common.py | 448b1dfc1d30465375b2ed7fcb49bc16ab58b3af | [] | no_license | denoland/chromium_build | 6644e800214cc080ab38eeaa77bace280c92bc75 | 3d4b0c1e773d659da18710fc4984b8195f6d5aea | refs/heads/upstream | 2023-05-15T07:02:07.545465 | 2022-12-09T21:15:30 | 2022-12-09T21:37:17 | 159,756,845 | 36 | 18 | null | 2023-01-13T15:56:40 | 2018-11-30T02:26:43 | Python | UTF-8 | Python | false | false | 9,905 | py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common methods and variables used by Cr-Fuchsia testing infrastructure."""
import json
import logging
import os
import re
import subprocess
import time
from argparse import ArgumentParser
from typing import Iterable, List, Optional
from compatible_utils import get_ssh_prefix, get_host_arch
DIR_SRC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
REPO_ALIAS = 'fuchsia.com'
SDK_ROOT = os.path.join(DIR_SRC_ROOT, 'third_party', 'fuchsia-sdk', 'sdk')
SDK_TOOLS_DIR = os.path.join(SDK_ROOT, 'tools', get_host_arch())
_FFX_TOOL = os.path.join(SDK_TOOLS_DIR, 'ffx')
# This global variable is used to set the environment variable
# |FFX_ISOLATE_DIR| when running ffx commands in E2E testing scripts.
_FFX_ISOLATE_DIR = None
# TODO(crbug.com/1280705): Remove each entry when they are migrated to v2.
_V1_PACKAGE_LIST = [
'chrome_v1',
'web_engine',
'web_engine_with_webui',
'web_runner',
]
def set_ffx_isolate_dir(isolate_dir: str) -> None:
"""Overwrites |_FFX_ISOLATE_DIR|."""
global _FFX_ISOLATE_DIR # pylint: disable=global-statement
_FFX_ISOLATE_DIR = isolate_dir
def _get_daemon_status():
"""Determines daemon status via `ffx daemon socket`.
Returns:
dict of status of the socket. Status will have a key Running or
NotRunning to indicate if the daemon is running.
"""
status = json.loads(
run_ffx_command(['--machine', 'json', 'daemon', 'socket'],
check=True,
capture_output=True,
suppress_repair=True).stdout.strip())
return status.get('pid', {}).get('status', {'NotRunning': True})
def _is_daemon_running():
return 'Running' in _get_daemon_status()
def check_ssh_config_file() -> None:
"""Checks for ssh keys and generates them if they are missing."""
script_path = os.path.join(SDK_ROOT, 'bin', 'fuchsia-common.sh')
check_cmd = ['bash', '-c', f'. {script_path}; check-fuchsia-ssh-config']
subprocess.run(check_cmd, check=True)
def _wait_for_daemon(start=True, timeout_seconds=100):
"""Waits for daemon to reach desired state in a polling loop.
Sleeps for 5s between polls.
Args:
start: bool. Indicates to wait for daemon to start up. If False,
indicates waiting for daemon to die.
timeout_seconds: int. Number of seconds to wait for the daemon to reach
the desired status.
Raises:
TimeoutError: if the daemon does not reach the desired state in time.
"""
wanted_status = 'start' if start else 'stop'
sleep_period_seconds = 5
attempts = int(timeout_seconds / sleep_period_seconds)
for i in range(attempts):
if _is_daemon_running() == start:
return
if i != attempts:
logging.info('Waiting for daemon to %s...', wanted_status)
time.sleep(sleep_period_seconds)
raise TimeoutError(f'Daemon did not {wanted_status} in time.')
def _run_repair_command(output):
"""Scans |output| for a self-repair command to run and, if found, runs it.
Returns:
True if a repair command was found and ran successfully. False otherwise.
"""
# Check for a string along the lines of:
# "Run `ffx doctor --restart-daemon` for further diagnostics."
match = re.search('`ffx ([^`]+)`', output)
if not match or len(match.groups()) != 1:
return False # No repair command found.
args = match.groups()[0].split()
try:
run_ffx_command(args, suppress_repair=True)
# Need the daemon to be up at the end of this.
_wait_for_daemon(start=True)
except subprocess.CalledProcessError:
return False # Repair failed.
return True # Repair succeeded.
def run_ffx_command(cmd: Iterable[str],
target_id: Optional[str] = None,
check: bool = True,
suppress_repair: bool = False,
configs: Optional[List[str]] = None,
**kwargs) -> subprocess.CompletedProcess:
"""Runs `ffx` with the given arguments, waiting for it to exit.
If `ffx` exits with a non-zero exit code, the output is scanned for a
recommended repair command (e.g., "Run `ffx doctor --restart-daemon` for
further diagnostics."). If such a command is found, it is run and then the
original command is retried. This behavior can be suppressed via the
`suppress_repair` argument.
Args:
cmd: A sequence of arguments to ffx.
target_id: Whether to execute the command for a specific target. The
target_id could be in the form of a nodename or an address.
check: If True, CalledProcessError is raised if ffx returns a non-zero
exit code.
suppress_repair: If True, do not attempt to find and run a repair
command.
configs: A list of configs to be applied to the current command.
Returns:
A CompletedProcess instance
Raises:
CalledProcessError if |check| is true.
"""
ffx_cmd = [_FFX_TOOL]
if target_id:
ffx_cmd.extend(('--target', target_id))
if configs:
for config in configs:
ffx_cmd.extend(('--config', config))
ffx_cmd.extend(cmd)
env = os.environ
if _FFX_ISOLATE_DIR:
env['FFX_ISOLATE_DIR'] = _FFX_ISOLATE_DIR
try:
if not suppress_repair:
# If we want to repair, we need to capture output in STDOUT and
# STDERR. This could conflict with expectations of the caller.
output_captured = kwargs.get('capture_output') or (
kwargs.get('stdout') and kwargs.get('stderr'))
if not output_captured:
# Force output to combine into STDOUT.
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
return subprocess.run(ffx_cmd,
check=check,
encoding='utf-8',
env=env,
**kwargs)
except subprocess.CalledProcessError as cpe:
if suppress_repair or (cpe.output
and not _run_repair_command(cpe.output)):
raise
# If the original command failed but a repair command was found and
# succeeded, try one more time with the original command.
return run_ffx_command(cmd, target_id, check, True, **kwargs)
def run_continuous_ffx_command(cmd: Iterable[str],
target_id: Optional[str] = None,
encoding: Optional[str] = 'utf-8',
**kwargs) -> subprocess.Popen:
"""Runs an ffx command asynchronously."""
ffx_cmd = [_FFX_TOOL]
if target_id:
ffx_cmd.extend(('--target', target_id))
ffx_cmd.extend(cmd)
return subprocess.Popen(ffx_cmd, encoding=encoding, **kwargs)
def read_package_paths(out_dir: str, pkg_name: str) -> List[str]:
"""
Returns:
A list of the absolute path to all FAR files the package depends on.
"""
with open(
os.path.join(DIR_SRC_ROOT, out_dir, 'gen', 'package_metadata',
f'{pkg_name}.meta')) as meta_file:
data = json.load(meta_file)
packages = []
for package in data['packages']:
packages.append(os.path.join(DIR_SRC_ROOT, out_dir, package))
return packages
def register_common_args(parser: ArgumentParser) -> None:
"""Register commonly used arguments."""
common_args = parser.add_argument_group('common', 'common arguments')
common_args.add_argument(
'--out-dir',
'-C',
type=os.path.realpath,
help='Path to the directory in which build files are located. ')
def register_device_args(parser: ArgumentParser) -> None:
"""Register device arguments."""
device_args = parser.add_argument_group('device', 'device arguments')
device_args.add_argument('--target-id',
default=os.environ.get('FUCHSIA_NODENAME'),
help=('Specify the target device. This could be '
'a node-name (e.g. fuchsia-emulator) or an '
'an ip address along with an optional port '
'(e.g. [fe80::e1c4:fd22:5ee5:878e]:22222, '
'1.2.3.4, 1.2.3.4:33333). If unspecified, '
'the default target in ffx will be used.'))
def register_log_args(parser: ArgumentParser) -> None:
"""Register commonly used arguments."""
log_args = parser.add_argument_group('logging', 'logging arguments')
log_args.add_argument('--logs-dir',
type=os.path.realpath,
help=('Directory to write logs to.'))
def get_component_uri(package: str) -> str:
"""Retrieve the uri for a package."""
return f'fuchsia-pkg://{REPO_ALIAS}/{package}#meta/{package}.cm'
def resolve_packages(packages: List[str], target_id: Optional[str]) -> None:
"""Ensure that all |packages| are installed on a device."""
for package in packages:
resolve_cmd = [
'--', 'pkgctl', 'resolve',
'fuchsia-pkg://%s/%s' % (REPO_ALIAS, package)
]
subprocess.run(get_ssh_prefix(get_ssh_address(target_id)) +
resolve_cmd,
check=True)
def get_ssh_address(target_id: Optional[str]) -> str:
"""Determines SSH address for given target."""
return run_ffx_command(('target', 'get-ssh-address'),
target_id,
capture_output=True).stdout.strip()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
307cd74f01bac3f5f7372ab4a7ad3205c7496789 | 69ff10443014ac749d6e8e5eb8dd65f0ae995e0c | /install/opt/etc/gunicorn/sigproxy_config.py | 4764190e9bef0dfc137545068bab743d1f6bf0fd | [] | no_license | identinetics/d-PVZDweb | 1443051a961c2ffff3cef8fc5326a651d4783443 | 1e269546a505eed121e32cf22b7f2227e6473c95 | refs/heads/master | 2021-07-13T01:46:34.025748 | 2019-05-09T07:20:18 | 2019-05-09T07:22:06 | 160,792,534 | 0 | 1 | null | 2020-06-08T13:27:09 | 2018-12-07T08:10:35 | Shell | UTF-8 | Python | false | false | 786 | py | import os
from seclay_xmlsig_proxy_config import SigProxyConfig as Cfg
# Parameter description: see https://github.com/benoitc/gunicorn/blob/master/examples/example_config.py
bind = Cfg.host + ':' + str(Cfg.port)
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
accesslog = '/var/log/sigproxy/access.log'
errorlog = '/var/log/sigproxy/error.log'
loglevel = 'info'
pidfile = '/var/run/sigproxy/gunicorn.pid'
backlog = 64
workers = 1
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2
spew = False
daemon = True
raw_env = [
'CSRFENCRYPTKEY=' + os.environ['CSRFENCRYPTKEY'],
'CSRFSECRET=' + os.environ['CSRFSECRET'],
]
# raw_env.append('DEBUG=') # activate this to set workers = 1
umask = 0
user = None
group = None
| [
"rainer@hoerbe.at"
] | rainer@hoerbe.at |
a50973a8bd1678285d68efd946efa8189a37f8e9 | 39882eebbd9644851e8557507c45a55ccfb00227 | /4-Integrate/integrate.py | ce2e18b034d74746b633ce92fd5888dbee6ef6fb | [] | no_license | ayman-elkassas/Scipy-CrashCode | 8df0ffcbe8074742e90a638c4e35d273b16d83b0 | af655e7752e32430afbc5d3b8bdc2316853d1e4e | refs/heads/master | 2023-04-03T08:37:44.916335 | 2021-04-12T01:12:13 | 2021-04-12T01:12:13 | 357,019,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | import scipy.integrate
from numpy import exp
f= lambda x:exp(-x**2)
i = scipy.integrate.quad(f, 0, 1)
print(i) | [
"aymanelkassas88@gmail.com"
] | aymanelkassas88@gmail.com |
31629044d25bd064c858a57729c27efc7fae6dd7 | 5644bffcb3888788622732d70c4efe9766968e0b | /quotes_api/quotes_api/urls.py | d856b893bc6362c7f0b10ddfc30862ae92669895 | [] | no_license | jordanengstrom/quotes_api | ff341e852ce20f666009d2ae1967273c3e5bdcde | df2949ea7b35fede76eb2a54b96441103f4cc11e | refs/heads/main | 2023-01-01T04:17:45.458913 | 2020-10-26T19:47:30 | 2020-10-26T19:47:30 | 307,486,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | """quotes_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('api/', include('quote.api.urls')),
]
| [
"jordan.engstrom@gmail.com"
] | jordan.engstrom@gmail.com |
cbcfdc0e60f18a05779c713ed704226606269649 | 7950091dfd123b9fbe020cb8c9f529e98f7a89d8 | /weatherenv/Lib/site-packages/pipenv/vendor/backports/__init__.py | 0c64b4c10b513a109244dbb31d44bb616bdcf10c | [
"LicenseRef-scancode-python-cwi",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"MIT",
"BSD-3-Clause"
] | permissive | diyajaiswal11/Weather-App | d0000ebd12fb051cca8a4c56da4418c89714fb3e | f5e1bca505f6643d870b905577a383a10d17b026 | refs/heads/master | 2022-12-11T05:16:38.941949 | 2020-08-25T18:02:18 | 2020-08-25T18:02:18 | 231,254,913 | 3 | 0 | MIT | 2022-12-08T03:22:58 | 2020-01-01T19:25:37 | Python | UTF-8 | Python | false | false | 179 | py | __path__ = __import__('pkgutil').extend_path(__path__, __name__)
from . import weakref
from . import enum
from . import shutil_get_terminal_size
from . import functools_lru_cache
| [
"shubhijaiswal2000@gmail.com"
] | shubhijaiswal2000@gmail.com |
61e6f3a928aa35bcf99f40bd24b1eb37ecbdd5e0 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_local_network_gateways_operations.py | 21350c56dcb9f8c6f04581be444f0f4d9fc8807c | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 25,509 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.LocalNetworkGateway"
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.LocalNetworkGatewayListResult"]
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGatewayListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| [
"noreply@github.com"
] | YijunXieMS.noreply@github.com |
76d348dd6ae297a33d077ef1b31f5028fbf0cb36 | a8d86cad3f3cc6a977012d007d724bbaf02542f7 | /testsuites_dev/vui/sandbox/parse_test_suite.py | af2cfbb7b8f5b922afabd76a9b2df5c5256e8d27 | [] | no_license | bopopescu/bigrobot | f8d971183119a1d59f21eb2fc08bbec9ee1d522b | 24dad9fb0044df5a473ce4244932431b03b75695 | refs/heads/master | 2022-11-20T04:55:58.470402 | 2015-03-31T18:14:39 | 2015-03-31T18:14:39 | 282,015,194 | 0 | 0 | null | 2020-07-23T17:29:53 | 2020-07-23T17:29:52 | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/env python
import sys
from robot.api import TestData
def print_suite(suite):
print 'Suite:', suite.name
for test in suite.testcase_table:
print '-', test.name
for child in suite.children: # recurse through testsuite directory
print_suite(child)
suite = TestData(source=sys.argv[1])
print_suite(suite)
| [
"vui.le@bigswitch.com"
] | vui.le@bigswitch.com |
ccc0c00ee714e842e0109aed656cd984dce4fb0a | b7add0d1b1effc50b27d3316fa5889a5227e5b19 | /Micropython/backups/tests/archieved/servo_driver_test.py | b14cc8b334a2136f39fec59f7d606d07e82b091c | [] | no_license | Woz4tetra/Atlas | efb83a7c7b2698bf8b36b023f7aa573cc38284f6 | c7380868a9efef9d1594ed7aa87187f03a7e4612 | refs/heads/master | 2020-04-04T06:25:50.657631 | 2017-04-05T01:53:15 | 2017-04-05T01:53:15 | 50,269,756 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import pyb
from libraries.pca9685 import ServoDriver
servo_driver = ServoDriver(2, -90, 90, 150, 600)
assert servo_driver.angle_to_pulse(-90) == 150
assert servo_driver.angle_to_pulse(90) == 600
assert servo_driver.angle_to_pulse(0) == 375
# servo_driver.servo_angle_min =
# servo_driver.servo_angle_max =
# servo_driver.servo_pulse_min =
# servo_driver.servo_pulse_max =
servo_driver.conversion = \
(servo_driver.servo_pulse_max - servo_driver.servo_pulse_min) / (
servo_driver.servo_angle_max - servo_driver.servo_angle_min)
for value in range(servo_driver.servo_angle_min,
servo_driver.servo_angle_max + 1, 10):
for servo_num in range(16):
servo_driver.set_servo(servo_num, value)
print(value)
pyb.delay(200)
| [
"woz4tetra@gmail.com"
] | woz4tetra@gmail.com |
3159f08ce73dc31e6bc9ee9d40859fdff73dd26b | 7fb87945b77d3adaedd8a155c981e97946734e41 | /cachetools/func.py | 78ec7f632964682cc6be0e5d5585a16cb22d26d7 | [] | no_license | Tony910517/openstack | 916b36368ea9f17958e4eb04bd1f9daf3aba9213 | 4c1380a03c37e7950dcf2bba794e75b7e4a8dfd0 | refs/heads/master | 2020-05-20T01:05:22.499224 | 2019-05-07T01:11:05 | 2019-05-07T01:11:05 | 185,292,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | import collections
import functools
import random
import time
from .lfu import LFUCache
from .lru import LRUCache
from .rr import RRCache
from .ttl import TTLCache
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _NullContext:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
_nullcontext = _NullContext()
def _makekey_untyped(args, kwargs):
return (args, tuple(sorted(kwargs.items())))
def _makekey_typed(args, kwargs):
key = _makekey_untyped(args, kwargs)
key += tuple(type(v) for v in args)
key += tuple(type(v) for _, v in sorted(kwargs.items()))
return key
def _cachedfunc(cache, typed=False, lock=None):
makekey = _makekey_typed if typed else _makekey_untyped
context = lock() if lock else _nullcontext
def decorator(func):
stats = [0, 0]
def wrapper(*args, **kwargs):
key = makekey(args, kwargs)
with context:
try:
result = cache[key]
stats[0] += 1
return result
except KeyError:
stats[1] += 1
result = func(*args, **kwargs)
with context:
try:
cache[key] = result
except ValueError:
pass # value too large
return result
def cache_info():
with context:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with context:
stats[:] = [0, 0]
cache.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return functools.update_wrapper(wrapper, func)
return decorator
def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
return _cachedfunc(LFUCache(maxsize, getsizeof), typed, lock)
def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
return _cachedfunc(LRUCache(maxsize, getsizeof), typed, lock)
def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None,
lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
return _cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock)
def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False,
getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
return _cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock)
| [
"471123674@qq.com"
] | 471123674@qq.com |
22fb8faa4caea83148a72dd49a86ee026fd19c42 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02659/s499786056.py | 498c72c7de83f8d871d5f5e2a21764ab75f056b6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | a, b = map(float, input().split())
a = round(a)
b = round(b*100)
print(a * b // 100) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7943b6a769c17e914c502d093500da05a3a32b96 | 37482bcc34c569b2042cc4f893a92360cb5cbca6 | /shell/userinfo.py | ed06ab41df9ca3fd84acfc2b025e1ee08f2ecb10 | [] | no_license | hustmonk/k21 | 8442b7bdc6eb92282add59a4ee9166a89897d3f4 | 12279e970da57150154ef3a6343afccb8b992870 | refs/heads/master | 2016-09-05T09:22:48.506265 | 2015-07-11T13:01:33 | 2015-07-11T13:01:33 | 35,711,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | #!/usr/bin/env python
# -*- coding: GB2312 -*-
# Last modified:
"""docstring
"""
__revision__ = '0.1'
from common import *
from weekend import *
from Object import *
class Userinfo:
def __init__(self):
self.uid_num = {}
self.uid_days = {}
for line in open("conf/user.info"):
uid,num,days = line.strip().split("\t")
self.uid_num[uid] = int(num)
self.uid_days[uid] = days.split(",")
self.week = Week()
self.obj = Obj()
def get_num(self, uid):
return self.uid_num[uid]
def get_info(self, uid):
return self.uid_days[uid]
def get_features(self, uid, course_id):
f = [0]*(CIDX_VEC_NUM+1)
for day in self.get_info(uid):
cidx = self.obj.get_index(course_id, self.week.times(day))
f[cidx] = f[cidx] + 1
f[CIDX_VEC_NUM] = self.get_num(uid)
return f
if __name__ == "__main__":
userinfo = Userinfo()
print userinfo.get_features("vCk71G02ss3o0puuBIhnOZwxNIZqe2KE", "3cnZpv6ReApmCaZyaQwi2izDZxVRdC01")
| [
"liujingminghust@163.com"
] | liujingminghust@163.com |
f0f5d2e0e0693be10cb308b2a39769d289e2ecbc | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /hoxv8zaQJNMWJqnt3_6.py | 80095df9794340c219bca34f726b867be42b5f8d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py |
def is_heteromecic(n, test = 0):
if n==test*(test+1): return True
if test>int(n**.5): return False
return is_heteromecic(n, test+1)
# yet again, I'd hardly call this recursion...
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fb6ce6d5ebf417aab66391b1ce2da8a5afc32d15 | 71bd623429f3b9f3701603836cf91f98436d48a7 | /tests/test_compute_embeddings.py | 0bfee0d2862e82494e0f8b5576c22f2b73997d55 | [
"Apache-2.0"
] | permissive | xiaobiaohust/sentence-transformers | b9f7d41901ef3159cb933e3d7d4f2e7698503975 | 167e4567670d711ef543239d0b922858c796a2fc | refs/heads/master | 2023-03-23T05:46:18.989786 | 2021-03-17T21:20:15 | 2021-03-17T21:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | """
Computes embeddings
"""
import unittest
from sentence_transformers import SentenceTransformer
import numpy as np
class ComputeEmbeddingsTest(unittest.TestCase):
def setUp(self):
self.model = SentenceTransformer('paraphrase-distilroberta-base-v1')
def test_encode_token_embeddings(self):
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
sent = ["Hello Word, a test sentence", "Here comes another sentence", "My final sentence", "Sentences", "Sentence five five five five five five five"]
emb = self.model.encode(sent, output_value='token_embeddings', batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(self.model.tokenize([s])['input_ids'][0]) == e.shape[0]
def test_encode_single_sentences(self):
#Single sentence
emb = self.model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Single sentence as list
emb = self.model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Sentence list
emb = self.model.encode(["Hello Word, a test sentence", "Here comes another sentence", "My final sentence"])
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.001
def test_encode_normalize(self):
emb = self.model.encode(["Hello Word, a test sentence", "Here comes another sentence", "My final sentence"], normalize_embeddings=True)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(self):
# Input a sentence tuple
emb = self.model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.001
# List of sentence tuples
emb = self.model.encode([("Hello Word, a test sentence", "Second input for model"), ("My second tuple", "With two inputs"), ("Final tuple", "final test")])
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.001
def test_multi_gpu_encode(self):
# Start the multi-process pool on all available CUDA devices
pool = self.model.start_multi_process_pool(['cpu', 'cpu'])
sentences = ["This is sentence {}".format(i) for i in range(1000)]
# Compute the embeddings using the multi-process pool
emb = self.model.encode_multi_process(sentences, pool, chunk_size=50)
assert emb.shape == (1000, 768)
emb_normal = self.model.encode(sentences)
diff = np.sum(np.abs(emb - emb_normal))
assert diff < 0.001
| [
"rnils@web.de"
] | rnils@web.de |
ed8a1eaea1d7b77ed9b4b067104c6a228d5336a4 | 6a6984544a4782e131510a81ed32cc0c545ab89c | /src/simprod-scripts/resources/tests/generators/nugen-generator.py | 934c279088774b490b79df7b1f9a5806373d3362 | [] | no_license | wardVD/IceSimV05 | f342c035c900c0555fb301a501059c37057b5269 | 6ade23a2fd990694df4e81bed91f8d1fa1287d1f | refs/heads/master | 2020-11-27T21:41:05.707538 | 2016-09-02T09:45:50 | 2016-09-02T09:45:50 | 67,210,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | #!/usr/bin/env python
"""Ensure that the NuGen API hasn't changed (too much)"""
import os
import tempfile
import shutil
from icecube.simprod.modules.nugen import NuGen
from icecube import icetray, dataclasses, dataio
from I3Tray import I3Tray
try:
tmpdir = tempfile.mkdtemp(dir=os.getcwd())
tmpfile = os.path.join(tmpdir,'test.i3')
summaryfile = os.path.join(tmpdir,'summary.xml')
gcdfile = os.path.expandvars('$I3_TESTDATA/sim/GCD.i3.gz')
# make a very small nugen file
n = NuGen()
n.SetParameter('nevents',1)
n.SetParameter('outputfile',tmpfile)
n.SetParameter('summaryfile',summaryfile)
n.SetParameter('gcdfile',gcdfile)
n.SetParameter('mjd',55697)
n.SetParameter('NuFlavor','NuMu')
if n.Execute({}) != 0:
raise Exception('NuGen did not return OK')
# now check generated file
tray = I3Tray()
tray.Add('I3Reader', filename=tmpfile)
def checky(frame):
assert('NuGPrimary' in frame)
assert('I3MCTree' in frame)
tray.Add(checky, Streams=[icetray.I3Frame.DAQ])
tray.Execute()
tray.Finish()
finally:
shutil.rmtree(tmpdir)
| [
"wardvandriessche@gmail.com"
] | wardvandriessche@gmail.com |
ef990efbcc159fa01bb54f036ae7fdee2768ce9c | b67958ebbde7538f6c5dc0305ed278f7c1a9528a | /networking_tn/common/config.py | 4d6a5bfa7757d4899831213ab593e0c75d393d7d | [
"Apache-2.0"
] | permissive | xjforfuture/networking-ngfw | 4c6b04ede370ba9888422715d5a4be7858335fe0 | 26fa3aa94e0ae733dde47c82f3372afeb07ff24b | refs/heads/master | 2021-05-15T06:34:12.658302 | 2017-12-12T07:01:31 | 2017-12-12T07:01:31 | 113,798,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,819 | py | # Copyright 2015 Fortinet Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fortiosclient import client
from oslo_config import cfg
from networking_fortinet._i18n import _
ML2_FORTINET = [
cfg.StrOpt('address', default='',
help=_('The address of fortigates to connect to')),
cfg.StrOpt('port', default='443',
help=_('The FGT port to serve API requests')),
cfg.StrOpt('protocol', default='https',
help=_('The FGT uses which protocol: http or https')),
cfg.StrOpt('username', default='admin',
help=_('The username used to login')),
cfg.StrOpt('password', default='password', secret=True,
help=_('The password used to login')),
cfg.StrOpt('int_interface', default='internal',
help=_('The interface to serve tenant network')),
cfg.StrOpt('ext_interface', default='',
help=_('The interface to the external network')),
cfg.StrOpt('tenant_network_type', default='vlan',
help=_('tenant network type, default is vlan')),
cfg.StrOpt('vlink_vlan_id_range', default='3500:4000',
help=_('vdom link vlan interface, default is 3500:4000')),
cfg.StrOpt('vlink_ip_range', default='169.254.0.0/20',
help=_('vdom link interface IP range, '
'default is 169.254.0.0/20')),
cfg.StrOpt('vip_mappedip_range', default='169.254.128.0/23',
help=_('The intermediate IP range in floating IP process, '
'default is 169.254.128.0/23')),
cfg.BoolOpt('npu_available', default=True,
help=_('If npu_available is True, it requires hardware FGT'
'with NPU, default is True')),
cfg.BoolOpt('enable_default_fwrule', default=False,
help=_('If True, fwaas will add a deny all rule automatically,'
' otherwise users need to add it manaully.')),
cfg.StrOpt('av_profile', default=None,
help=_('Assign a default antivirus profile in FWaaS, '
'the profile must exist in FGT, default is ""')),
cfg.StrOpt('webfilter_profile', default=None,
help=_('Assign a default web filter profile in FWaaS, '
'the profile must exist in FGT, default is ""')),
cfg.StrOpt('ips_sensor', default=None,
help=_('Assign a default IPS profile in FWaaS, '
'the profile must exist in FGT, default is ""')),
cfg.StrOpt('application_list', default=None,
help=_('Assign a default application control profile in FWaaS,'
' the profile must exist in FGT, default is ""')),
cfg.StrOpt('ssl_ssh_profile', default=None,
help=_('Assign a default SSL/SSH inspection profile in FWaaS, '
'the profile must exist in FGT, default is ""'))
]
cfg.CONF.register_opts(ML2_FORTINET, "ml2_fortinet")
fgt_info = {
'address': cfg.CONF.ml2_fortinet.address,
'port': cfg.CONF.ml2_fortinet.port,
'protocol': cfg.CONF.ml2_fortinet.protocol,
'username': cfg.CONF.ml2_fortinet.username,
'password': cfg.CONF.ml2_fortinet.password,
'int_interface': cfg.CONF.ml2_fortinet.int_interface,
'ext_interface': cfg.CONF.ml2_fortinet.ext_interface,
'tenant_network_type': cfg.CONF.ml2_fortinet.tenant_network_type,
'vlink_vlan_id_range': cfg.CONF.ml2_fortinet.vlink_vlan_id_range,
'vlink_ip_range': cfg.CONF.ml2_fortinet.vlink_ip_range,
'vip_mappedip_range': cfg.CONF.ml2_fortinet.vip_mappedip_range,
'npu_available': cfg.CONF.ml2_fortinet.npu_available,
'enable_default_fwrule': cfg.CONF.ml2_fortinet.enable_default_fwrule,
'av_profile': cfg.CONF.ml2_fortinet.av_profile,
'webfilter_profile': cfg.CONF.ml2_fortinet.webfilter_profile,
'ips_sensor': cfg.CONF.ml2_fortinet.ips_sensor,
'application_list': cfg.CONF.ml2_fortinet.application_list,
'ssl_ssh_profile': cfg.CONF.ml2_fortinet.ssl_ssh_profile
}
def get_apiclient():
"""Fortinet api client initialization."""
api_server = [(fgt_info['address'], fgt_info['port'],
'https' == fgt_info['protocol'])]
return client.FortiosApiClient(
api_server, fgt_info['username'], fgt_info['password'])
| [
"xjforfuture@163.com"
] | xjforfuture@163.com |
93120f4f678d41f66a3161ce124689235c26903b | 0c6100dc16291986fab157ed0437f9203f306f1b | /2000- 3000/2712.py | 1116e8cbeb79eb95c5222b1816185b7230018b3d | [] | no_license | Matuiss2/URI-ONLINE | 4c93c139960a55f7cc719d0a3dcd6c6c716d3924 | 6cb20f0cb2a6d750d58b826e97c39c11bf8161d9 | refs/heads/master | 2021-09-17T09:47:16.209402 | 2018-06-30T08:00:14 | 2018-06-30T08:00:14 | 110,856,303 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | import re
loops = int(input())
for i in range(loops):
data = input()
placa = re.compile('([A-Z]{3})-([0-9]{4})') # vê se segue o formato AAA-9999
# Tem que checar o tamanho pois placas como AAA-9999x também são aceitas pelo regex
if placa.match(data) and len(data) == 8:
ultimo = data[-1]
if ultimo in ["1", "2"]:
print("MONDAY")
elif ultimo in ["3", "4"]:
print("TUESDAY")
elif ultimo in ["5", "6"]:
print("WEDNESDAY")
elif ultimo in ["7", "8"]:
print("THURSDAY")
elif ultimo in ["0", "9"]:
print("FRIDAY")
else:
print("FAILURE") | [
"noreply@github.com"
] | Matuiss2.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.