blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b84d3b11fa6dfbb8473a9b3f047db072ab6e4e1c | 733496067584ee32eccc333056c82d60f673f211 | /idfy_rest_client/models/lei_extension.py | 1450e531f5c6749b7d897d4b93f8308aab8171d1 | [
"MIT"
] | permissive | dealflowteam/Idfy | 90ee5fefaa5283ce7dd3bcee72ace4615ffd15d2 | fa3918a6c54ea0eedb9146578645b7eb1755b642 | refs/heads/master | 2020-03-07T09:11:15.410502 | 2018-03-30T08:12:40 | 2018-03-30T08:12:40 | 127,400,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | # -*- coding: utf-8 -*-
"""
idfy_rest_client.models.lei_extension
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
import idfy_rest_client.models.lei_normalizations
class LeiExtension(object):
"""Implementation of the 'LeiExtension' model.
TODO: type model description here.
Attributes:
normalizations (LeiNormalizations): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"normalizations":'Normalizations'
}
def __init__(self,
normalizations=None,
additional_properties = {}):
"""Constructor for the LeiExtension class"""
# Initialize members of the class
self.normalizations = normalizations
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
normalizations = idfy_rest_client.models.lei_normalizations.LeiNormalizations.from_dictionary(dictionary.get('Normalizations')) if dictionary.get('Normalizations') else None
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(normalizations,
dictionary)
| [
"runes@unipluss.no"
] | runes@unipluss.no |
b04fa65ba521b2aebad06173fd2eff8204459b7f | a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c | /venv/Lib/site-packages/pandas/tests/io/test_spss.py | 0cf9168a66d613bfa9649ed5cfc46d95a4139ef2 | [] | no_license | mylonabusiness28/Final-Year-Project- | e4b79ccce6c19a371cac63c7a4ff431d6e26e38f | 68455795be7902b4032ee1f145258232212cc639 | refs/heads/main | 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ee5880b545343d135efe088b0a70ff099001d051e96f7319293415e05e61e458
size 2821
| [
"chuksajeh1@gmail.com"
] | chuksajeh1@gmail.com |
7ae557cb0c4814beab359729b85cd437ee2b1ccc | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/81640a2c67e9ab8088a72ca02d8d58ecf41abcd1-<set_bios_attributes>-fix.py | 0206fc5bbd8980d4bd0678cff66cf7da3a9a9cef | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | def set_bios_attributes(self, attr):
result = {
}
key = 'Bios'
response = self.get_request((self.root_uri + self.systems_uri))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
if (key not in data):
return {
'ret': False,
'msg': ('Key %s not found' % key),
}
bios_uri = data[key]['@odata.id']
response = self.get_request((self.root_uri + bios_uri))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
set_bios_attr_uri = data['@Redfish.Settings']['SettingsObject']['@odata.id']
bios_attr = (((('{"' + attr['bios_attr_name']) + '":"') + attr['bios_attr_value']) + '"}')
payload = {
'Attributes': json.loads(bios_attr),
}
response = self.patch_request((self.root_uri + set_bios_attr_uri), payload, HEADERS)
if (response['ret'] is False):
return response
return {
'ret': True,
} | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
ba5e3d97a8dc58bac13af5ebf5ddf5edc2160f80 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02748/s008643234.py | bb6afcb3d80563b76660339227a400095939720e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | a,b,m = map(int,input().split())
a_list = [int(x.strip()) for x in input().split()]
b_list = [int(x.strip()) for x in input().split()]
ans = min(a_list)+min(b_list)
for i in range(m):
ai,bi,ci = map(int,input().split())
ch = a_list[ai-1]+b_list[bi-1]-ci
if ch < ans:
ans = ch
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
51dd909f38145255ef950fa9793e47e41c6a99a5 | 0b7e418fc63cf0ed65c0feeee6b749a89e7f4972 | /untitled/app.py | c070463e6be460209603a911dd7193811fe99409 | [] | no_license | pipoted/bs | 7e7a46942d7b37cada3de8e834c6c67050733505 | a4091eb54dbe74e86defdee89f729e3c73ad3ed1 | refs/heads/master | 2020-04-28T06:36:44.745625 | 2019-04-18T04:57:01 | 2019-04-18T04:57:01 | 175,065,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def hello_world():
if request.method == "POST":
print('test')
return render_template('test.html')
else:
return render_template('test.html')
if __name__ == '__main__':
app.run()
| [
"32660879+pipoted@users.noreply.github.com"
] | 32660879+pipoted@users.noreply.github.com |
d864a2019c0937083b6ed018af1078af20af7e7f | aae8d348ea13956cfa6136ad711b0d3a40116101 | /test_client.py | 91ef95c8bcc222e9086805621489b79c860293b9 | [
"MIT"
] | permissive | TrendingTechnology/supabase-client | b15b76cb1135abcb239907690351e9cd3f97f8d0 | 5ba6b9c5753d20c506cd5f7f6abbd887dc29295b | refs/heads/master | 2023-07-06T12:32:38.502579 | 2021-08-08T13:35:50 | 2021-08-08T13:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,579 | py | import asyncio
import unittest
from supabase_client.supabase_client import Client
from dotenv import dotenv_values
config = dotenv_values(".env")
def async_test(async_func):
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
loop.run_until_complete(async_func(*args, **kwargs))
return wrapper
class TestSupabaseClient(unittest.TestCase):
supabase = Client(
api_url=config.get("SUPABASE_URL"),
api_key=config.get("SUPABASE_KEY")
)
@async_test
async def test_read(self):
error, results = await (
self.supabase.table("posts")
.select("*")
.query()
)
if not error:
self.assertEqual(type(results), list)
@async_test
async def test_insert(self):
error, results = await (
self.supabase.table("posts")
.select("*")
.query()
)
if not error:
self.assertEqual(type(results), list)
previous_length = len(results)
error, result = await (
self.supabase.table("posts")
.insert([{"title": "test new title"}])
)
if not error:
error, new_results = await (
self.supabase.table("posts")
.select("*")
.query()
)
if not error:
self.assertNotEqual(previous_length,len(new_results))
@async_test
async def test_update(self):
_id = 1
error, results = await (
self.supabase.table("posts")
.select("*")
.eq("id", _id)
.query()
)
if not error:
self.assertEqual(type(results), list)
if results:
new_title = "updated title"
error, result = await (
self.supabase.table("posts")
.update({"id": f"eq.{_id}"},
{"title":new_title}
)
)
if not error:
error, results = await (
self.supabase.table("posts")
.select("*")
.eq("id", _id)
.query()
)
if not error:
if results:
data = results[0]
self.assertNotEqual(data.get("title"), new_title)
@async_test
async def test_delete(self):
error, results = await (
self.supabase.table("posts")
.select("*")
.query()
)
if not error:
self.assertEqual(type(results), list)
previous_length = len(results)
error, result = await (
self.supabase.table("posts")
.delete({"title": "test new title"})
)
if not error:
error, new_results = await (
self.supabase.table("posts")
.select("*")
.query()
)
if not error:
self.assertNotEqual(previous_length,len(new_results))
if __name__ == "__main__":
unittest.main() | [
"noreply@github.com"
] | TrendingTechnology.noreply@github.com |
1933bbc98fbf6522dea9c65b084743453763dc35 | 59fbeea017110472a788218db3c6459e9130c7fe | /watering-plants-ii/watering-plants-ii.py | 45a13807ec424f5749b3d29c04c8beef1be12a74 | [] | no_license | niufenjujuexianhua/Leetcode | 82b55d9382bc9f63f4d9da9431194e20a4d299f1 | 542c99e038d21429853515f62af51a77deaa4d9c | refs/heads/master | 2022-04-27T16:55:00.035969 | 2022-03-10T01:10:04 | 2022-03-10T01:10:04 | 79,742,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,666 | py | # Alice and Bob want to water n plants in their garden. The plants are arranged
# in a row and are labeled from 0 to n - 1 from left to right where the iᵗʰ plant
# is located at x = i.
#
# Each plant needs a specific amount of water. Alice and Bob have a watering
# can each, initially full. They water the plants in the following way:
#
#
# Alice waters the plants in order from left to right, starting from the 0ᵗʰ
# plant. Bob waters the plants in order from right to left, starting from the (n - 1
# )ᵗʰ plant. They begin watering the plants simultaneously.
# If one does not have enough water to completely water the current plant, he/
# she refills the watering can instantaneously.
# It takes the same amount of time to water each plant regardless of how much
# water it needs.
# One cannot refill the watering can early.
# Each plant can be watered either by Alice or by Bob.
# In case both Alice and Bob reach the same plant, the one with more water
# currently in his/her watering can should water this plant. If they have the same
# amount of water, then Alice should water this plant.
#
#
# Given a 0-indexed integer array plants of n integers, where plants[i] is the
# amount of water the iᵗʰ plant needs, and two integers capacityA and capacityB
# representing the capacities of Alice's and Bob's watering cans respectively,
# return the number of times they have to refill to water all the plants.
#
#
# Example 1:
#
#
# Input: plants = [2,2,3,3], capacityA = 5, capacityB = 5
# Output: 1
# Explanation:
# - Initially, Alice and Bob have 5 units of water each in their watering cans.
# - Alice waters plant 0, Bob waters plant 3.
# - Alice and Bob now have 3 units and 2 units of water respectively.
# - Alice has enough water for plant 1, so she waters it. Bob does not have
# enough water for plant 2, so he refills his can then waters it.
# So, the total number of times they have to refill to water all the plants is 0
# + 0 + 1 + 0 = 1.
#
# Example 2:
#
#
# Input: plants = [2,2,3,3], capacityA = 3, capacityB = 4
# Output: 2
# Explanation:
# - Initially, Alice and Bob have 3 units and 4 units of water in their
# watering cans respectively.
# - Alice waters plant 0, Bob waters plant 3.
# - Alice and Bob now have 1 unit of water each, and need to water plants 1 and
# 2 respectively.
# - Since neither of them have enough water for their current plants, they
# refill their cans and then water the plants.
# So, the total number of times they have to refill to water all the plants is 0
# + 1 + 1 + 0 = 2.
#
# Example 3:
#
#
# Input: plants = [5], capacityA = 10, capacityB = 8
# Output: 0
# Explanation:
# - There is only one plant.
# - Alice's watering can has 10 units of water, whereas Bob's can has 8 units.
# Since Alice has more water in her can, she waters this plant.
# So, the total number of times they have to refill is 0.
#
# Example 4:
#
#
# Input: plants = [1,2,4,4,5], capacityA = 6, capacityB = 5
# Output: 2
# Explanation:
# - Initially, Alice and Bob have 6 units and 5 units of water in their
# watering cans respectively.
# - Alice waters plant 0, Bob waters plant 4.
# - Alice and Bob now have 5 units and 0 units of water respectively.
# - Alice has enough water for plant 1, so she waters it. Bob does not have
# enough water for plant 3, so he refills his can then waters it.
# - Alice and Bob now have 3 units and 1 unit of water respectively.
# - Since Alice has more water, she waters plant 2. However, she does not have
# enough water to completely water this plant. Hence she refills her can then
# waters it.
# So, the total number of times they have to refill to water all the plants is 0
# + 0 + 1 + 1 + 0 = 2.
#
# Example 5:
#
#
# Input: plants = [2,2,5,2,2], capacityA = 5, capacityB = 5
# Output: 1
# Explanation:
# Both Alice and Bob will reach the middle plant with the same amount of water,
# so Alice will water it.
# She will have 1 unit of water when she reaches it, so she will refill her can.
#
# This is the only refill needed.
#
#
#
# Constraints:
#
#
# n == plants.length
# 1 <= n <= 10⁵
# 1 <= plants[i] <= 10⁶
# max(plants[i]) <= capacityA, capacityB <= 10⁹
#
# \U0001f44d 37 \U0001f44e 49
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def minimumRefill(self, plants, capacityA, capacityB):
"""
:type plants: List[int]
:type capacityA: int
:type capacityB: int
:rtype: int
"""
i, j = 0, len(plants) - 1
a, b = capacityA, capacityB
res = 0
while i <= j:
if i == j:
if a >= b:
if a < plants[i]:
res += 1
a = capacityA - plants[i]
else:
a -= plants[i]
else:
if b < plants[j]:
res += 1
b = capacityB - plants[j]
else:
b -= plants[j]
else:
if a < plants[i]:
res += 1
a = capacityA - plants[i]
else:
a -= plants[i]
if b < plants[j]:
res += 1
b = capacityB - plants[j]
else:
b -= plants[j]
i += 1
j -= 1
return res
# print(Solution().minimumRefill([7,7,7,7,7,7,7]
# ,7
# ,8))
# leetcode submit region end(Prohibit modification and deletion)
| [
"wutuo123@yeah.net"
] | wutuo123@yeah.net |
8ba9ee326fed8b16c26ba53c76e76b2f9a085003 | 6b5242766c7c199d82064f7b7b244ed16fa4275c | /venv/bin/pip | 70e778de2da6d5e7974433f74fa165b38be7bdb8 | [] | no_license | uuboyscy/tibame-db105 | 55822b30bb4b1b30d759e6010f83944be7f1cbaf | 2a6b3ebb0aee49369621da099fa457dcb5ea48f6 | refs/heads/master | 2022-11-21T09:57:00.086000 | 2019-11-10T14:20:33 | 2019-11-10T14:20:33 | 219,315,128 | 1 | 1 | null | 2022-11-04T05:40:00 | 2019-11-03T14:41:29 | Python | UTF-8 | Python | false | false | 410 | #!/Users/uuboy.scy/PycharmProjects/tibame-db105/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"aegis12321@gmail.com"
] | aegis12321@gmail.com | |
f9761d545344e24746558d32496fc7ac83279c2b | 696f501c25bb5059c8f6d184cff2e17e1da164a7 | /testing/testing_journaling.py | 253b018c560fc2855cd171b87e7bbaa49520113d | [
"MIT"
] | permissive | ibosity/flow-dashboard | 92a1a515f23ff4d684d93067c79581c22c2e109c | db1e9d3cc91bbbf5c41758710b4837128269bff3 | refs/heads/master | 2020-03-19T16:02:25.239699 | 2018-06-03T19:00:13 | 2018-06-03T19:00:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | #!/usr/bin/python
# -*- coding: utf8 -*-
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import testbed
from datetime import datetime, timedelta
from google.appengine.ext import deferred
from base_test_case import BaseTestCase
from models import JournalTag, MiniJournal, User
from flow import app as tst_app
class JournalingTestCase(BaseTestCase):
def setUp(self):
self.set_application(tst_app)
self.setup_testbed()
self.init_datastore_stub()
self.init_memcache_stub()
self.init_taskqueue_stub()
self.init_mail_stub()
self.register_search_api_stub()
u = User.Create(email="test@example.com")
u.put()
self.u = u
def test_journal_tag_parsign(self):
volley = [
("Fun #PoolParty with @KatyRoth", ["#PoolParty"], ["@KatyRoth"]),
("Stressful day at work with @BarackObama", [], ["@BarackObama"]),
("Went #Fishing with @JohnKariuki and got #Sick off #Seafood", ["#Fishing", "#Sick", "#Seafood"], ["@JohnKariuki"]),
("Went #Fishing with @BarackObama", ["#Fishing"], ["@BarackObama"]),
(None, [], []),
]
for v in volley:
txt, expected_hashes, expected_people = v
jts = JournalTag.CreateFromText(self.u, txt)
hashes = map(lambda jt: jt.key.id(), filter(lambda jt: not jt.person(), jts))
people = map(lambda jt: jt.key.id(), filter(lambda jt: jt.person(), jts))
self.assertEqual(expected_hashes, hashes)
self.assertEqual(expected_people, people)
self.assertEqual(len(JournalTag.All(self.u)), 7)
| [
"onejgordon@gmail.com"
] | onejgordon@gmail.com |
f44cfb7716ed0ebdfc46ee6555adc9770d503a4b | c6b38205d6c722b4646f5410b8d9bd5f3b3ffeb0 | /account/migrations/0007_auto_20200218_1625.py | d1da78c25d42df4a988730a7c2aef9b44bc8f67e | [] | no_license | MehdioKhan/proman-back | bd6f3be0cec34236c63260eaf1215bd9c538e89c | 2c894df943d89aaebcbaf5717958fd7962fd897b | refs/heads/master | 2022-12-21T13:43:40.117989 | 2020-02-18T16:29:34 | 2020-02-18T16:29:34 | 230,323,480 | 0 | 0 | null | 2021-09-22T18:26:34 | 2019-12-26T20:20:14 | Python | UTF-8 | Python | false | false | 1,068 | py | # Generated by Django 3.0 on 2020-02-18 16:25
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0006_auto_20200217_0659'),
]
operations = [
migrations.AlterField(
model_name='role',
name='permissions',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(choices=[('view_project', 'View project'), ('add_project', 'Add project'), ('change_project', 'Modify project'), ('delete_project', 'Delete project'), ('view_task', 'View task'), ('add_task', 'Add task'), ('change_task', 'Modify task'), ('comment_task', 'Comment task'), ('delete_task', 'Delete task'), ('change_project', 'Change project'), ('delete_project', 'Delete project'), ('add_member', 'Add member'), ('remove_member', 'Remove member'), ('admin_project_values', 'Admin project values'), ('admin_roles', 'Admin roles')]), blank=True, default=list, null=True, size=None, verbose_name='permissions'),
),
]
| [
"mehdiokhan@gmail.com"
] | mehdiokhan@gmail.com |
fd967bef434e7a35d437f8eab4c13f0a6d4c05bb | c819e434d642670ad02c1b3919a5300568dc8b99 | /lib/python3.8/site-packages/ib_common/constants/language_choices.py | 726d8139589f0450579c41ea8cc5e185684e2de8 | [] | no_license | ushatirumalasetty/venv | c8dcf002c8259501cb745b65d38fe753d4ae2da1 | 7c892873c1221a816a62cdba0fb9ad491cfba261 | refs/heads/master | 2022-11-27T18:54:54.643274 | 2020-07-23T09:54:42 | 2020-07-23T09:54:42 | 281,914,259 | 0 | 1 | null | 2022-11-20T15:47:34 | 2020-07-23T09:54:18 | Python | UTF-8 | Python | false | false | 656 | py | from enum import Enum
from .base_enum_class import BaseEnumClass
__author__ = 'vedavidh'
class LanguageEnum(BaseEnumClass, Enum):
ENGLISH = 'ENGLISH'
HINDI = 'HINDI'
TELUGU = 'TELUGU'
TAMIL = 'TAMIL'
KANNADA = 'KANNADA'
class Languages(BaseEnumClass, Enum):
"""
Enum class representing all the languages supported using vernacular
"""
ENGLISH = 'en'
HINDI = 'hi'
TELUGU = 'te'
TAMIL = 'ta'
KANNADA = 'kn'
BENGALI = 'bn'
MARATHI = 'mr'
LANGUAGE_CHOICES = [(e.value, e.value) for e in LanguageEnum]
LANGUAGES = [e.value for e in LanguageEnum]
DEFAULT_LANGUAGE = LanguageEnum.ENGLISH.value
| [
"rayvaleshusha@gmail.com"
] | rayvaleshusha@gmail.com |
dc75f176215a65bfeed775933dcc25c9bfd0a06f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02771/s302588631.py | 3c08a44ee3a970eb4bad4bd40e4b66271cd4ab8b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | A,B,C=map(int,input().split())
if (A==B and A!=C) or (B==C and B!=A) or (C==A and C!=B):
print('Yes')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cc6d22ecfcd67fde6b613179f16017076a966161 | 17a3418a6143ea2d953cf6509aeca7cc6e074686 | /Final-Project/backend/venv/bin/aws_completer | 3a97357ae88d3f6bd91dfee2589a1bd515f52d17 | [] | no_license | francolmenar-USYD/Internet-Software-Platforms | addb69a5582a63877e5f3408d64485a7ca942721 | 9e82ab6e7d0f8d4b3d55789cf5cfcd8e524a85df | refs/heads/master | 2022-04-22T02:07:25.419086 | 2020-04-22T10:02:43 | 2020-04-22T10:02:43 | 256,714,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | #!/mnt/c/Shared/ELEC3609/bird-repo/backend/venv/bin/python3
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
if os.environ.get('LC_CTYPE', '') == 'UTF-8':
os.environ['LC_CTYPE'] = 'en_US.UTF-8'
import awscli.completer
if __name__ == '__main__':
# bash exports COMP_LINE and COMP_POINT, tcsh COMMAND_LINE only
cline = os.environ.get('COMP_LINE') or os.environ.get('COMMAND_LINE') or ''
cpoint = int(os.environ.get('COMP_POINT') or len(cline))
try:
awscli.completer.complete(cline, cpoint)
except KeyboardInterrupt:
# If the user hits Ctrl+C, we don't want to print
# a traceback to the user.
pass
| [
"francolmenar@outlook.es"
] | francolmenar@outlook.es | |
72848852e83be523f39f31f32ac0dcfc34edae11 | d7b4e2e391e1f15fd7cb4fbf4d9aee598131b007 | /AE_Datasets/R_A/datasets/CWRUFFT.py | 254be0baee6bfeaa9abd1637485dccf4ee008772 | [
"MIT"
] | permissive | wuyou33/DL-based-Intelligent-Diagnosis-Benchmark | eba2ce6f948b5abe68069e749f64501a32e1d7ca | e534f925cf454d07352f7ef82d75a8d6dac5355c | refs/heads/master | 2021-01-02T15:06:29.041349 | 2019-12-28T21:47:21 | 2019-12-28T21:47:21 | 239,673,952 | 1 | 0 | MIT | 2020-02-11T04:15:21 | 2020-02-11T04:15:20 | null | UTF-8 | Python | false | false | 5,624 | py | import os
import torch
import numpy as np
import pandas as pd
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from datasets.SequenceDatasets import dataset
from datasets.sequence_aug import *
from tqdm import tqdm
#Digital data was collected at 12,000 samples per second
signal_size=1024
datasetname = ["12k Drive End Bearing Fault Data", "12k Fan End Bearing Fault Data", "48k Drive End Bearing Fault Data",
"Normal Baseline Data"]
normalname = ["97.mat", "98.mat", "99.mat", "100.mat"]
# For 12k Drive End Bearing Fault Data
dataname1 = ["105.mat", "118.mat", "130.mat", "169.mat", "185.mat", "197.mat", "209.mat", "222.mat",
"234.mat"] # 1797rpm
dataname2 = ["106.mat", "119.mat", "131.mat", "170.mat", "186.mat", "198.mat", "210.mat", "223.mat",
"235.mat"] # 1772rpm
dataname3 = ["107.mat", "120.mat", "132.mat", "171.mat", "187.mat", "199.mat", "211.mat", "224.mat",
"236.mat"] # 1750rpm
dataname4 = ["108.mat", "121.mat", "133.mat", "172.mat", "188.mat", "200.mat", "212.mat", "225.mat",
"237.mat"] # 1730rpm
# For 12k Fan End Bearing Fault Data
dataname5 = ["278.mat", "282.mat", "294.mat", "274.mat", "286.mat", "310.mat", "270.mat", "290.mat",
"315.mat"] # 1797rpm
dataname6 = ["279.mat", "283.mat", "295.mat", "275.mat", "287.mat", "309.mat", "271.mat", "291.mat",
"316.mat"] # 1772rpm
dataname7 = ["280.mat", "284.mat", "296.mat", "276.mat", "288.mat", "311.mat", "272.mat", "292.mat",
"317.mat"] # 1750rpm
dataname8 = ["281.mat", "285.mat", "297.mat", "277.mat", "289.mat", "312.mat", "273.mat", "293.mat",
"318.mat"] # 1730rpm
# For 48k Drive End Bearing Fault Data
dataname9 = ["109.mat", "122.mat", "135.mat", "174.mat", "189.mat", "201.mat", "213.mat", "250.mat",
"262.mat"] # 1797rpm
dataname10 = ["110.mat", "123.mat", "136.mat", "175.mat", "190.mat", "202.mat", "214.mat", "251.mat",
"263.mat"] # 1772rpm
dataname11 = ["111.mat", "124.mat", "137.mat", "176.mat", "191.mat", "203.mat", "215.mat", "252.mat",
"264.mat"] # 1750rpm
dataname12 = ["112.mat", "125.mat", "138.mat", "177.mat", "192.mat", "204.mat", "217.mat", "253.mat",
"265.mat"] # 1730rpm
# label
label = [1, 2, 3, 4, 5, 6, 7, 8, 9] # The failure data is labeled 1-9
axis = ["_DE_time", "_FE_time", "_BA_time"]
# generate Training Dataset and Testing Dataset
def get_files(root, test=False):
'''
This function is used to generate the final training set and test set.
root:The location of the data set
normalname:List of normal data
dataname:List of failure data
'''
data_root1 = os.path.join('/tmp', root, datasetname[3])
data_root2 = os.path.join('/tmp', root, datasetname[0])
path1 = os.path.join('/tmp', data_root1, normalname[0]) # 0->1797rpm ;1->1772rpm;2->1750rpm;3->1730rpm
data, lab = data_load(path1, axisname=normalname[0],label=0) # The label for normal data is 0
for i in tqdm(range(len(dataname1))):
path2 = os.path.join('/tmp', data_root2, dataname1[i])
data1, lab1 = data_load(path2, dataname1[i], label=label[i])
data += data1
lab += lab1
return [data, lab]
def data_load(filename, axisname, label):
'''
This function is mainly used to generate test data and training data.
filename:Data location
axisname:Select which channel's data,---->"_DE_time","_FE_time","_BA_time"
'''
datanumber = axisname.split(".")
if eval(datanumber[0]) < 100:
realaxis = "X0" + datanumber[0] + axis[0]
else:
realaxis = "X" + datanumber[0] + axis[0]
fl = loadmat(filename)[realaxis]
fl = fl.reshape(-1,)
data = []
lab = []
start, end = 0, signal_size
while end <= fl.shape[0]:
x = fl[start:end]
x = np.fft.fft(x)
x = np.abs(x) / len(x)
x = x[range(int(x.shape[0] / 2))]
x = x.reshape(-1,1)
data.append(x)
lab.append(label)
start += signal_size
end += signal_size
return data, lab
def data_transforms(dataset_type="train", normlize_type="-1-1"):
transforms = {
'train': Compose([
Reshape(),
Normalize(normlize_type),
RandomAddGaussian(),
RandomScale(),
RandomStretch(),
RandomCrop(),
Retype()
]),
'val': Compose([
Reshape(),
Normalize(normlize_type),
Retype()
])
}
return transforms[dataset_type]
class CWRUFFT(object):
num_classes = 10
inputchannel = 1
def __init__(self, data_dir,normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
list_data = get_files(self.data_dir, test)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd["label"])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train',self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val',self.normlizetype))
return train_dataset, val_dataset
| [
"646032073@qq.com"
] | 646032073@qq.com |
0c1c7c59a90d3a47d0f88a3cbbd025f0cc467d3a | 74482894c61156c13902044b4d39917df8ed9551 | /cryptoapis/model/validate_address_request_body_data_item.py | 734ae14dc2265ee6ebf43f95b063cbf521ca71fa | [
"MIT"
] | permissive | xan187/Crypto_APIs_2.0_SDK_Python | bb8898556ba014cc7a4dd31b10e24bec23b74a19 | a56c75df54ef037b39be1315ed6e54de35bed55b | refs/heads/main | 2023-06-22T15:45:08.273635 | 2021-07-21T03:41:05 | 2021-07-21T03:41:05 | 387,982,780 | 1 | 0 | NOASSERTION | 2021-07-21T03:35:29 | 2021-07-21T03:35:29 | null | UTF-8 | Python | false | false | 7,042 | py | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class ValidateAddressRequestBodyDataItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'address': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'address': 'address', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, address, *args, **kwargs): # noqa: E501
"""ValidateAddressRequestBodyDataItem - a model defined in OpenAPI
Args:
address (str): Represents the specific address that will be checked if it's valid or not.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.address = address
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"kristiyan.ivanov@menasoftware.com"
] | kristiyan.ivanov@menasoftware.com |
80faed9b37a683651a800576bdc93c0757087338 | 6b19ed8845f7cb020ad49da57a0c0fe85314a274 | /zerver/migrations/0170_submessage.py | 751932249a3011674f642f189788660cb10396ba | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | jahau/zulip | eb4da13858892065591caced88fc9a086fa0e0d2 | 51a8873579b9d4bb95219cd4a5c859fa972fa06b | refs/heads/master | 2021-05-18T03:44:32.003307 | 2020-03-27T22:29:55 | 2020-03-28T19:04:36 | 251,087,399 | 1 | 0 | Apache-2.0 | 2020-03-29T17:11:42 | 2020-03-29T17:11:42 | null | UTF-8 | Python | false | false | 931 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-26 21:54
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0169_stream_is_announcement_only'),
]
operations = [
migrations.CreateModel(
name='SubMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('msg_type', models.TextField()),
('content', models.TextField()),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zerver.Message')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"tabbott@zulipchat.com"
] | tabbott@zulipchat.com |
a9cbb2646e980b7d6c69201039cc77287c106129 | a59dcdb8e8b963a5082d4244889b82a4379510f6 | /bemani/tests/test_CardCipher.py | b290ec9ee4bbd90cdec566f7689dae3218fe85c2 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | vangar/bemaniutils | 9fdda035eb29e5e0b874d475fdbfdc8b99aeb003 | 284153ef2e2e40014656fbfeb254c05f3a8c61e8 | refs/heads/trunk | 2023-04-04T04:00:21.088088 | 2023-02-17T03:32:27 | 2023-02-17T03:40:07 | 332,388,243 | 1 | 0 | null | 2021-01-24T07:07:34 | 2021-01-24T07:07:33 | null | UTF-8 | Python | false | false | 1,596 | py | # vim: set fileencoding=utf-8
import unittest
from bemani.common import CardCipher
class TestCardCipher(unittest.TestCase):
def test_internal_cipher(self) -> None:
test_ciphers = [
(
[0x68, 0xFC, 0xA5, 0x27, 0x00, 0x01, 0x04, 0xE0],
[0xC7, 0xD0, 0xB3, 0x85, 0xAD, 0x1F, 0xD9, 0x49],
),
(
[0x2C, 0x10, 0xA6, 0x27, 0x00, 0x01, 0x04, 0xE0],
[0x33, 0xC6, 0xE6, 0x2E, 0x6E, 0x33, 0x38, 0x74],
),
]
for pair in test_ciphers:
inp = bytes(pair[0])
out = bytes(pair[1])
encoded = CardCipher._encode(inp)
self.assertEqual(
encoded, out, f"Card encode {encoded!r} doesn't match expected {out!r}"
)
decoded = CardCipher._decode(out)
self.assertEqual(
decoded, inp, f"Card decode {decoded!r} doesn't match expected {inp!r}"
)
def test_external_cipher(self) -> None:
test_cards = [
("S6E523E30ZK7ML1P", "E004010027A5FC68"),
("78B592HZSM9E6712", "E004010027A6102C"),
]
for card in test_cards:
back = card[0]
db = card[1]
decoded = CardCipher.decode(back)
self.assertEqual(
decoded, db, f"Card DB {decoded} doesn't match expected {db}"
)
encoded = CardCipher.encode(db)
self.assertEqual(
encoded, back, f"Card back {encoded} doesn't match expected {back}"
)
| [
"dragonminded@dragonminded.com"
] | dragonminded@dragonminded.com |
154cc575312b41fe86166cc220dbc83fdd0a36cd | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/fishnet99/src/config.py | 09542e1e0df894872cd2291de2ec208e8be86557 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 1,681 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""from googlenet"""
from easydict import EasyDict as edict
imagenet_cfg = edict({
'name': 'imagenet',
'pre_trained': False,
'num_classes': 1000,
'lr_init': 0.05, # Ascend_1P: 0.05, Ascend_8P: 0.4, GPU_1P: 0.05, GPU_2P: 0.1
'batch_size': 128,
'epoch_size': 160, # GPU_2P: 110
'momentum': 0.9,
'weight_decay': 1e-4,
'image_height': 224,
'image_width': 224,
'data_path': '/data/ILSVRC2012_train/',
'val_data_path': '/data/ILSVRC2012_val/',
'device_target': 'Ascend',
'device_id': 0,
'keep_checkpoint_max': 30,
'checkpoint_path': None,
'onnx_filename': 'fishnet99',
'air_filename': 'fishnet99',
# optimizer and lr related
'lr_scheduler': 'cosine_annealing',
'lr_epochs': [30, 60, 90, 120],
'lr_gamma': 0.3,
'eta_min': 0.0,
'T_max': 150, # GPU_2P: 100
'warmup_epochs': 0,
# loss related
'is_dynamic_loss_scale': 0,
'loss_scale': 1024,
'label_smooth_factor': 0.1,
'use_label_smooth': True,
})
| [
"chenhaozhe1@huawei.com"
] | chenhaozhe1@huawei.com |
15d9fcafc6d3e710f817f1f63f034500a2afd560 | f4bc045760aa9017ff08dfabd7eb8dd2134e9da8 | /src/security/errorHandlers.py | e38e1dcbbad4081d13de896d04afde638fd374be | [
"MIT"
] | permissive | nagasudhirpulla/wrldc_mis_flask_ui | 196aab28eb1120e682f2d58c01b1261236da2145 | fc438a01ba29200591f9f1ae53fab3d716169ffd | refs/heads/master | 2023-03-10T22:31:07.317901 | 2021-02-25T05:44:55 | 2021-02-25T05:44:55 | 291,735,397 | 0 | 3 | MIT | 2020-12-07T06:16:39 | 2020-08-31T14:19:26 | Python | UTF-8 | Python | false | false | 674 | py | from flask import render_template
def page_forbidden(err):
return render_template('message.html.j2', title='403 Forbidden',
message='You must be logged in to access this content.'), 403
def page_unauthorized(err):
return render_template('message.html.j2', title='401 Unauthorized',
message='You must be authorized in to access this content.'), 401
def page_not_found(err):
return render_template('message.html.j2', title='404 Not Found',
message='The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again.'), 404
| [
"nagasudhirpulla@gmail.com"
] | nagasudhirpulla@gmail.com |
3b362a09495427acbe39d45d7eb9a5175fe36c3b | 83179c14ae81a2ed0733812195747340c9ef0555 | /Takahashi_Unevoleved.py | 9b54d332f59c6ad8eaf04e41b44dac034d700298 | [] | no_license | susami-jpg/atcoder_solved_probrem | 20f90ba2e3238c6857e370ed04a8407271ccc36f | 741a4acd79f637d6794c4dbcc2cad1c601b749fc | refs/heads/master | 2023-07-21T12:38:27.460309 | 2021-08-29T10:26:31 | 2021-08-29T10:26:31 | 375,561,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 19 00:37:30 2021
@author: kazuk
"""
x, y, a, b = map(int, input().split())
def is_ok(t):
cost = x
cnt = 0
while 1:
if cnt == t:
break
if cost * a < b:
cost *= a
cnt += 1
else:
break
cost += (t - cnt) * b
if cost < y:
return True
else:
return False
def meguru_bisect(ng, ok):
'''
初期値のng,okを受け取り,is_okを満たす最小(最大)のokを返す
まずis_okを定義すべし
ng ok は とり得る最小の値-1 とり得る最大の値+1
最大最小が逆の場合はよしなにひっくり返す
'''
while (abs(ok - ng) > 1):
mid = (ok + ng) // 2
if is_ok(mid):
ok = mid
else:
ng = mid
return ok
ans = meguru_bisect(y, 0)
print(ans)
| [
"kazuki_susami@icloud.com"
] | kazuki_susami@icloud.com |
91f123874a66cec1442164b3cc12adf9525b11a9 | 2fa102b20ea99d796cc3677c9305f1a80be18e6b | /cf_977_a.py | 6432622d333ee0d5453c12b811960b283a09f017 | [] | no_license | pronob1010/Codeforces_Solve | e5186b2379230790459328964d291f6b40a4bb07 | 457b92879a04f30aa0003626ead865b0583edeb2 | refs/heads/master | 2023-03-12T11:38:31.114189 | 2021-03-03T05:49:17 | 2021-03-03T05:49:17 | 302,124,730 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | a,b = list(map(int, input().split()))
for i in range(b):
c = a%10
if c == 0:
a //= 10
else:
a = a - 1
print(a) | [
"pronobmozumder.info@gmail.com"
] | pronobmozumder.info@gmail.com |
f55908dbef33fc48ad2e595c0ff179d825e7d76f | 3ca1a812ab4e7278e88a9bc6a183e341803cb9ad | /2022/06/main.py | cbf8d1ad542bad6d1a84954a42298b0f10a5285a | [] | no_license | acarlson99/advent-of-code | 11608a436e3fd4eadc8a3960811b6e495859d024 | be7c0cf337a5c70be12df6812cbddb48d7207495 | refs/heads/master | 2023-05-25T14:57:28.883802 | 2023-05-20T01:36:48 | 2023-05-20T01:36:48 | 225,130,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | #!/usr/bin/env python3
import fileinput
if __name__=='__main__':
lines = []
for line in fileinput.input():
lines.append(line.strip())
s = lines[0]
print(s)
for i in range(4,len(s)):
rl = s[i-4:i]
if len(set(rl))==4:
print(rl)
print(set(rl))
print(i)
break
for i in range(14,len(s)):
rl = s[i-14:i]
if len(set(rl))==14:
print(rl)
print(set(rl))
print(i)
break
| [
"a@a.a"
] | a@a.a |
7870dadc7fcf4f57d56e72d69ef936c12785c0d2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02725/s087554992.py | 731dabb171585e41f2a3d2db76f359d80c174b9b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import sys
from array import array
read = sys.stdin.buffer.read
k, n, *A = map(int, read().split())
A += [k + A[0]]
far = max(array("l", [x - y for x, y in zip(A[1:], A)]))
print(k-far) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e445505e34d8a75cf9f93fe8268057531f532fa4 | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/common/Lib/ctypes/test/test_errcheck.py | e5cdb39ddec7c316011427cc60216299774d573d | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 153 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/ctypes/test/test_errcheck.py
import sys
from ctypes import *
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
842aaeca36cb8fe937d897dc527eb11e4f3aafd3 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Luhb2KStP2wiX6FMp_7.py | 03174c243680289a3989162ffdf71abc0d1a5d1b | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | """
Create a function to return the amount of potatoes there are in a string.
### Examples
potatoes("potato") ➞ 1
potatoes("potatopotato") ➞ 2
potatoes("potatoapple") ➞ 1
### Notes
N/A
"""
potatoes=lambda p:p.count('p')
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f46d78197be08429fc60e17a7d504549e4bbc2ad | e9f9e38f935748ee043647452a2bfb949c30ff46 | /backend/event/migrations/0001_initial.py | d859b916059be0a1c063e392df533f02287ed429 | [] | no_license | crowdbotics-apps/test2-19679 | 19ff09828e3c4241bc48ff827cf22586b68718d5 | 788c724e16b87cbf21d980b1baf777f84751d388 | refs/heads/master | 2022-12-01T04:41:52.053674 | 2020-08-20T08:10:21 | 2020-08-20T08:10:21 | 288,945,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,766 | py | # Generated by Django 2.2.15 on 2020-08-20 08:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
('name', models.CharField(blank=True, max_length=256, null=True)),
],
),
migrations.CreateModel(
name='Faq',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amenities', models.TextField(blank=True, null=True)),
('name', models.CharField(blank=True, max_length=256, null=True)),
('image', models.SlugField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Vendor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('logo_image', models.SlugField(blank=True, null=True)),
('type', models.TextField(blank=True, null=True)),
('website', models.URLField(blank=True, null=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendor_category', to='event.Category')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendor_location', to='event.Location')),
],
),
migrations.CreateModel(
name='VendorDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField()),
('description', models.TextField()),
('associated_name', models.TextField(blank=True, null=True)),
('vendor_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendordetail_vendor_id', to='event.Vendor')),
],
),
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('logo_image', models.SlugField()),
('sponsor_level', models.TextField()),
('presenter', models.BooleanField()),
('website', models.URLField(blank=True, null=True)),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sponsor_location', to='event.Location')),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateTime', models.DateTimeField()),
('description', models.TextField(blank=True, null=True)),
('track', models.TextField(blank=True, null=True)),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='schedule_location', to='event.Location')),
],
),
migrations.CreateModel(
name='Presenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('title', models.CharField(max_length=256)),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='presenter_schedule', to='event.Schedule')),
],
),
migrations.CreateModel(
name='MySchedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schedule', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='myschedule_schedule', to='event.Schedule')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='myschedule_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Favorites',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='favorites_user', to=settings.AUTH_USER_MODEL)),
('vendor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='favorites_vendor', to='event.Vendor')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b5555f4a7a53a167eaca76b9c45cdc9e564dcdde | ee880f62a8ffc1b8544695d3bc1f4bcf809965ab | /load.py | a992777614213450331b4e97154455b4acf0eff7 | [] | no_license | kosyachniy/twianalysis | 1ba3ba6319cbeedf4f19e83ff31f01ced8b26e54 | 514a0ebb7829a4abb340d499e1151b5c55f26f80 | refs/heads/master | 2021-01-01T16:01:29.477061 | 2017-07-29T18:31:25 | 2017-07-29T18:31:25 | 97,756,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | import requests, time, json
from bs4 import BeautifulSoup
url='http://mfd.ru/news/company/view/?id=3&page='
with open('db.txt', 'w') as file:
for s in range(893):
query=str(s)
page = requests.get(url + query).text
soup = BeautifulSoup(page, 'lxml')
table = soup.find('table', id='issuerNewsList')
tr=table.find_all('tr')
for i in tr:
td=i.find_all('td')
date=td[0].contents[0].strip()
name=td[1].a.contents[0].strip()
print(date, name)
a=json.dumps({'date':date, 'name':name}, ensure_ascii=False)
print(a, file=file)
time.sleep(1) | [
"polozhev@mail.ru"
] | polozhev@mail.ru |
c9f649966413184912b0a18b36262eb636f1c1bc | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/win32/test/test_win32rcparser.py | 02510974cd502991311d8e0bd3161538cefaf9e1 | [] | no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | import os
import sys
import tempfile
import unittest
import win32con
import win32rcparser
class TestParser(unittest.TestCase):
def setUp(self):
rc_file = os.path.join(os.path.dirname(__file__), "win32rcparser", "test.rc")
self.resources = win32rcparser.Parse(rc_file)
def testStrings(self):
for sid, expected in [
("IDS_TEST_STRING4", "Test 'single quoted' string"),
("IDS_TEST_STRING1", 'Test "quoted" string'),
("IDS_TEST_STRING3", 'String with single " quote'),
("IDS_TEST_STRING2", 'Test string'),
]:
got = self.resources.stringTable[sid].value
self.assertEqual(got, expected)
def testStandardIds(self):
for idc in "IDOK IDCANCEL".split():
correct = getattr(win32con, idc)
self.assertEqual(self.resources.names[correct], idc)
self.assertEqual(self.resources.ids[idc], correct)
def testTabStop(self):
d = self.resources.dialogs["IDD_TEST_DIALOG2"]
tabstop_names = ["IDC_EDIT1", "IDOK"] # should have WS_TABSTOP
tabstop_ids = [self.resources.ids[name] for name in tabstop_names]
notabstop_names = ["IDC_EDIT2"] # should have WS_TABSTOP
notabstop_ids = [self.resources.ids[name] for name in notabstop_names]
num_ok = 0
for cdef in d[1:]: # skip dlgdef
# print cdef
cid = cdef[2]
style = cdef[-2]
styleex = cdef[-1]
if cid in tabstop_ids:
self.failUnlessEqual(style & win32con.WS_TABSTOP, win32con.WS_TABSTOP)
num_ok += 1
elif cid in notabstop_ids:
self.failUnlessEqual(style & win32con.WS_TABSTOP, 0)
num_ok += 1
self.failUnlessEqual(num_ok, len(tabstop_ids) + len(notabstop_ids))
class TestGenerated(TestParser):
def setUp(self):
# don't call base!
rc_file = os.path.join(os.path.dirname(__file__), "win32rcparser", "test.rc")
py_file = tempfile.mktemp('test_win32rcparser.py')
try:
win32rcparser.GenerateFrozenResource(rc_file, py_file)
py_source = open(py_file).read()
finally:
if os.path.isfile(py_file):
os.unlink(py_file)
# poor-man's import :)
globs = {}
exec(py_source, globs, globs)
self.resources = globs["FakeParser"]()
if __name__ == '__main__':
unittest.main()
| [
"44142880+GregVargas1999@users.noreply.github.com"
] | 44142880+GregVargas1999@users.noreply.github.com |
cdff15de0b8ec7264a8c9b69e2d3a96475f6c8de | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/mongodb/apis/DescribeSecurityIpsRequest.py | b2c3816b3457164790bb60201ef3ace536c5987e | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,347 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeSecurityIpsRequest(JDCloudRequest):
"""
查询实例访问白名单
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeSecurityIpsRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}/securityIps', 'GET', header, version)
self.parameters = parameters
class DescribeSecurityIpsParameters(object):
def __init__(self, regionId, instanceId, ):
"""
:param regionId: Region ID
:param instanceId: Instance ID
"""
self.regionId = regionId
self.instanceId = instanceId
| [
"oulinbao@jd.com"
] | oulinbao@jd.com |
9b3a33eba56dd2a09d23dcb118330a360473d9ab | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/93704/kaggle-allstate-purchase-master/pre_parse.py | 2e32dced90c7215c9210ca3e6123bde503f8189e | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,057 | py | """
Project: http://www.kaggle.com/c/allstate-purchase-prediction-challenge
Ranking: 9th from 1571 teams
Work Period: 12-may-2014 to 19-may-2014
Author: Euclides Fernandes Filho
email: euclides5414@gmail.com
"""
import numpy as np
import pandas as pd
from os import path
import conv
from time import sleep, time
from imports import *
ROOT = "./"
PRE_TRAIN_FILE = ROOT + 'data/train.csv'
PRE_TEST_FILE = ROOT + 'data/test_v2.csv'
TRAIN_FILE = ROOT + 'data/train_P.csv'
TEST_FILE = ROOT + 'data/test_v2_P.csv'
def pre_parse():
convs = {'car_value':conv.conv_car_value, 'state':conv.conv_state, 'C_previous':conv.conv_C_previous, 'duration_previous':conv.conv_duration_previous, 'time':conv.conv_time}
if not path.exists(TRAIN_FILE):
train = pd.read_csv(PRE_TRAIN_FILE, converters=convs)
train = do_risk(train)
train.to_csv(TRAIN_FILE, sep=',',na_rep="NA")
else:
train = pd.read_csv(TRAIN_FILE)
if not path.exists(TEST_FILE):
if path.exists(TEST_FILE + ".tmp"):
test = pd.read_csv(TEST_FILE + ".tmp")
else:
test = pd.read_csv(PRE_TEST_FILE, converters=convs)
test = do_risk(test)
# save a tmp file for safety in the case of a further error
test.to_csv(TEST_FILE + ".tmp", sep=',',na_rep="NA")
cols = list(test.columns.values)
print(cols)
for c in cols:
if c.startswith('Unnamed'):
test = test.drop(c,1)
print(c, "droped")
#some test location NAs
imp = Imputer(strategy='median',axis=0)
for state in np.unique(test.state):
v = test[test['state']==state].values
# sklearn bug version 0.14.1 - need to stack a dummy column before median imputation
# see http://stackoverflow.com/questions/23742005/scikit-learn-imputer-class-possible-bug-with-median-strategy
z = np.zeros(len(v))
z = z.reshape((len(z),1))
v = np.hstack((z,v))
v = imp.fit_transform(v)
test[test['state']==state] = v
test.to_csv(TEST_FILE, sep=',',na_rep="NA")
else:
test = pd.read_csv(TEST_FILE)
print(train.shape, test.shape)
return train, test
def do_risk(dt):
state, old_state = "FL",""
age_youngest = 75
age_oldest = 0
print("You'd better off drink a beer .... it will take a while .....")
sleep(2)
t0 = time()
for i in range(dt.shape[0]):
risk_factor = dt['risk_factor'][i]
if np.isnan(risk_factor):
state, age_oldest, age_youngest = dt['state'][i], dt['age_oldest'][i],dt['age_youngest'][i]
if state != old_state:
q_state = dt[(dt['state']==state) & (~np.isnan(dt['risk_factor']))]
old_state = state
q = q_state[((q_state['age_youngest']==age_youngest) & (q_state['age_oldest']==age_oldest))]
if len(q) > 0:
v = q['risk_factor'].median()
if np.isnan(v):
print(i,"ISNAN")
print(q)
dt['risk_factor'][i] = v
else:
for l,off in enumerate([1,2,3,4]):
q = q_state[((q_state['age_youngest']>=(age_youngest - off)) & (q_state['age_youngest'] <=(age_youngest + off)))\
& ((q_state['age_oldest']>=(age_oldest - off)) & (q_state['age_oldest']<=(age_oldest + off)))]
if len(q) > 0:
dt['risk_factor'][i] = q['risk_factor'].median()
print(i,":::LEVEL %i::::" % (l+1), len(q_state), len(q), state, age_youngest, age_oldest)
break
if len(q) == 0:
q = dt[((dt['age_youngest']>=(age_youngest - off)) & (dt['age_youngest'] <=(age_youngest + off)))\
& ((dt['age_oldest']>=(age_oldest - off)) & (dt['age_oldest']<=(age_oldest + off)))]
if len(q) > 0:
dt['risk_factor'][i] = q['risk_factor'].median()
print(i,":::LEVEL %i::::" % (l+2), len(q_state), len(q), state, age_youngest, age_oldest)
else:
if len(q) > 0:
q = dt[((dt['age_youngest']==age_youngest) & (dt['age_oldest']==age_oldest) & (~np.isnan(dt['risk_factor'])))]
print(i,":::LEVEL %i::::" % (l+3), len(q_state), len(q), state, age_youngest, age_oldest)
else:
print(i,":::FAILED::::", len(q_state), len(q), state, age_youngest, age_oldest)
break
print("risk NA done in %2.2f s" % (time() - t0))
print(dt.shape)
return dt
def main():
print(__doc__)
pre_parse()
if __name__ == '__main__':
main()
| [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
0f294442352392103ed94eb88fc76668f87af676 | 41acd1d7fcfba63d3b06b82d18d8a4d97dd40927 | /old/test_selenium.py | 9e85c7310eb625b00eacc66736b190a955884302 | [] | no_license | wancy86/learn_python | a33e3091b271840c8bf89cbbf991fe33b951a266 | 44e45a91361d6d46b9ab4a172af7e48e0f6df7dd | refs/heads/master | 2021-01-15T15:42:45.377381 | 2016-12-06T00:36:46 | 2016-12-06T00:36:46 | 55,651,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | from selenium import webdriver
# 创建一个chrome实例
driver = webdriver.Chrome()
#这个是制定google浏览器,
#指定IE webdriver
#driver webdriver.Ie(),
#指定Firefox webdriver driver webdriver.Firefox()
# 到百度主页
driver.get("http://www.baidu.com")
# 定位到搜索输入框
inputElement = driver.find_element_by_xpath ("//input[@name='wd']")
# 输入查找内容
inputElement.send_keys("selenium python")
# 点击百度一下
submitElement.submit()
# 输出网页标题
print(driver.title)
#退出webdriver
driver.quit()
# 运行脚本会自动开启chrome自动开始测试 | [
"wancy86@sina.com"
] | wancy86@sina.com |
7280a8c038a404bc9a0f451fcc58c89c88927b29 | 5a8c7a330d6be1fcc90ee0ef298fcecfe204951b | /lectures/class_two/classes.py | 83a85c6c964abf2971c24b6e5a15be413b8f2201 | [] | no_license | EricSchles/nyu_python_class | 299448b55c03dcd90a8606de1df13f52982628eb | 2b19bf70f6b233e00fadfe7664ebd3b635e9df44 | refs/heads/master | 2021-01-25T05:45:11.997409 | 2017-05-17T15:40:00 | 2017-05-17T15:40:00 | 80,680,329 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | import math
import statistics as st
class DescribeData:
def __init__(self, List): #stands for initialize
self.List = List
def describe(self):
print("Here are some statistics about our data")
print("---------------------------------------")
print("Our list has ",len(self.List),"many elements")
print("The mean is ",self.average()) #automatic type casting to string
print("The median is ",self.median())
if self.average() > self.median():
print("And the mean is ",abs(self.average()-self.median()),"greater than the median")
def average(self):
return st.mean(self.List)
def median(self):
return st.median(self.List)
def standard_deviation(self):
return st.stdev(self.List)
def describe(List):
ave = st.mean(List)
middle_number = st.median(List)
std_dev = st.stdev(List)
print("Here are some statistics about our data")
print("---------------------------------------")
print("Our list has ",len(List),"many elements")
print("The mean is ",ave) #automatic type casting to string
print("The median is ",middle_number)
if ave > middle_number:
print("And the mean is ",abs(ave-middle_number),"greater than the median")
if __name__ == '__main__':
import random
List = []
for i in range(200):
List.append(random.randint(0,100))
describer = DescribeData(List)
import code
code.interact(local=locals())
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
2bd407814b0f7d9875e56a9543aa05be1470ed27 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/keras/applications/vgg16.py | 76c77c1f512a4b52134aed97846b7a911effe39e | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d16551dce42289cab53b58657d749a6e4322069372faf4930082d8bd2d661d1e
size 518
| [
"github@cuba12345"
] | github@cuba12345 |
00ebdf6d66acc5dca1a037cfbaf180fb57b81fa0 | ad5ad404d24f1ef195d069b2e9d36b1a22cfd25d | /libs/llvm-meta/clang-tools-extra/clang-tools-extra.py | 6e159d72f9efac1c8dc04b7f11484ed7b6b2585c | [
"BSD-2-Clause"
] | permissive | arruor/craft-blueprints-kde | 6643941c87afd09f20dd54635022d8ceab95e317 | e7e2bef76d8efbc9c4b84411aa1e1863ac8633c1 | refs/heads/master | 2020-03-22T17:54:38.445587 | 2018-07-10T11:47:21 | 2018-07-10T11:47:21 | 140,423,580 | 0 | 0 | null | 2018-07-10T11:43:08 | 2018-07-10T11:43:07 | null | UTF-8 | Python | false | false | 563 | py | # -*- coding: utf-8 -*-
import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues(packageName="clang-tools-extra", gitUrl="[git]https://git.llvm.org/git/clang-tools-extra.git")
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = "default"
self.runtimeDependencies["libs/llvm-meta/llvm"] = "default"
from Package.VirtualPackageBase import *
class Package(SourceComponentPackageBase):
def __init__(self, **args):
SourceComponentPackageBase.__init__(self)
| [
"vonreth@kde.org"
] | vonreth@kde.org |
bb9d04bc3c077bf44239e3310d88723efd751376 | d1ad7bfeb3f9e3724f91458277284f7d0fbe4b2d | /python/002-tcp-server/server.py | 496f0090ef0b35aa6ca74b4f0ea938f3a4f0629a | [] | no_license | qu4ku/tutorials | 01d2d5a3e8740477d896476d02497d729a833a2b | ced479c5f81c8aff0c4c89d2a572227824445a38 | refs/heads/master | 2023-03-10T20:21:50.590017 | 2023-03-04T21:57:08 | 2023-03-04T21:57:08 | 94,262,493 | 0 | 0 | null | 2023-01-04T21:37:16 | 2017-06-13T22:07:54 | PHP | UTF-8 | Python | false | false | 459 | py | import socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostbyname()
port = 444
server_socket.bind((host, port))
server_socket.listen(3) # max num of connections
while True:
client_socket, address = server_socket.accept()
print(f'Received connection from {address}')
message = 'Thank you for connectoin to the server\r\n'
client_socket.send(message.encode('ascii'))
client_socket.close() | [
"qu4ku@hotmail.com"
] | qu4ku@hotmail.com |
3366f2acd4ae0124323c631d854eb5b0713dc2b2 | 7b8d505758cbadb002c9aa4449caf643836a829e | /tbkt/apps/task/urls.py | d2465afd980e2c934d98983da8ae0114cf218d0b | [] | no_license | GUAN-YE/myproject | 455a01dcc76629fc33d8154efcba9ef1af0faaa9 | 21e48a150dd6009a6bf2572b2d42eee20fbcbdfc | refs/heads/master | 2020-03-07T22:51:41.821356 | 2018-04-02T14:13:19 | 2018-04-02T14:13:19 | 127,765,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # coding: utf-8
from django.conf.urls import include, url, patterns
# 照顾老版本语文,修改老接口保证参数、返回数据不变
urlpatterns = patterns('apps.task.views',
(r"^sms$", "p_givesms"), # 发短信作业
(r"^checkNum$", "p_check_num"), # 带检查作业数
) | [
"15670549987@163.com"
] | 15670549987@163.com |
a396dee88c812cab4ef400af373230a30d9681b9 | 59a7f64d91b8466074630f0006f048cad66f01e8 | /focuslock/noneWidgets.py | 19b199146ee8eff83e006db7a537ec2367e25328 | [] | no_license | amancebo/acadia_new | cda1eafba65af71e0c4889f25c7dd527aa06abdc | 0c4bbd382140b92a85338b8b3335b2b660b8266b | refs/heads/master | 2021-03-30T16:49:46.252851 | 2015-06-01T23:46:22 | 2015-06-01T23:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | #!/usr/bin/python
#
# Dummy classes for use when you have some but not all
# of the focus lock functionality.
#
# Hazen 12/09
#
# Fake QPD
class QPD():
def __init__(self):
pass
def qpdScan(self):
return [1000.0, 0.0, 0.0]
def shutDown(self):
pass
# Fake nano-positioner
class NanoP():
def __init__(self):
pass
def moveTo(self, axis, position):
pass
def shutDown(self):
pass
# Fake IR laser
class IRLaser():
def __init__(self):
pass
def on(self):
pass
def off(self):
pass
#
# The MIT License
#
# Copyright (c) 2009 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| [
"amancebo8@gmail.com"
] | amancebo8@gmail.com |
b752d574ddacb2a426c671af560d8757f88254a7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03434/s582963555.py | 202b3b79bca8e1fc753461f081b5a5eff838303c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | """Boot-camp-for-Beginners_Easy010_B_Toll-Gates_29-August-2020.py"""
import numpy as np
N = int(input())
a = list(map(int, input().split()))
a = sorted(a, reverse=True)
Alice = [a[i] for i in range(0, len(a), 2)]
Bob = [a[i] for i in range(1, len(a), 2)]
#print(Alice)
#print(Bob)
s_max = 0
for i in range(len(Alice)):
s_max += Alice[i]
s_min = 0
for i in range(len(Bob)):
s_min += Bob[i]
#print(s_max)
#print(s_min)
print(s_max-s_min)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b263b1252c936c5b02eccbceeade34b685d6ea60 | bc2effb57e82128b81371fb03547689255d5ef15 | /백준/다이나믹 프로그래밍/2579(계단 오르기).py | 8007584be6e6c9eadf04a66b339c98f4460e1283 | [] | no_license | CharmingCheol/python-algorithm | 393fa3a8921f76d25e0d3f02402eae529cc283ad | 61c8cddb72ab3b1fba84171e03f3a36f8c672648 | refs/heads/master | 2023-03-01T11:00:52.801945 | 2021-01-31T13:38:29 | 2021-01-31T13:38:29 | 229,561,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | """
1.패턴 찾기
- 높이가 1인 경우
> nums[0]을 출력시킨다
- 높이가 2인 경우
> nums[0] + nums[1]을 출력시킨다
> 한칸씩 올라온 경우이기 때문
- 높이가 3이상인 경우
> case1 = nums[index] + nums[index - 1] + dp[index - 3]
case2 = nums[index] + dp[index - 2]
dp[index] = max(case1, case2)
"""
import sys
size = int(sys.stdin.readline())
nums = list(int(sys.stdin.readline()) for _ in range(size))
dp = [0] * size
dp[0] = nums[0]
if size == 1:
print(nums[0])
elif size == 2:
print(nums[0] + nums[1])
else:
for index in range(1, 3):
case1 = nums[index - 1] + nums[index] # 이전 계단 + 현재 계단
case2 = nums[index] # 현재 계단
if index == 2:
case2 += nums[index - 2] # 전전계단
dp[index] = max(case1, case2)
for index in range(3, size):
# 현재 + 이전 계단 + 전전전계단 dp 값
case1 = nums[index] + nums[index - 1] + dp[index - 3]
# 현재 + 전전계단 dp값
case2 = nums[index] + dp[index - 2]
dp[index] = max(case1, case2)
print(dp[size - 1])
| [
"54410332+chamincheol@users.noreply.github.com"
] | 54410332+chamincheol@users.noreply.github.com |
8127e734bff3bf1170acb97c2ed2a4638f4c4742 | 78e1a576b128c25b0e85234675f820a1d2d54595 | /urls_collector/migrations/0001_initial.py | a6930c1d867a6a24ca22fefa6f704642d985ef9d | [] | no_license | Evgenus/comeet-pdf-challenge | 7e0e3af9ddfd4558c37284d14c87ff5923b53e93 | 9dc3a3b1bbab0b06ed4f1a35f86fbb2826b64858 | refs/heads/master | 2021-05-10T10:20:26.112256 | 2018-01-22T13:32:36 | 2018-01-22T13:32:36 | 118,379,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-20 12:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('filename', models.CharField(max_length=250)),
],
options={
'ordering': ('created',),
},
),
migrations.CreateModel(
name='Occurence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='urls_collector.Document')),
],
),
migrations.CreateModel(
name='URL',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=2000)),
('documents', models.ManyToManyField(through='urls_collector.Occurence', to='urls_collector.Document')),
],
),
migrations.AddField(
model_name='occurence',
name='url',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='urls_collector.URL'),
),
migrations.AddField(
model_name='document',
name='urls',
field=models.ManyToManyField(through='urls_collector.Occurence', to='urls_collector.URL'),
),
]
| [
"chernyshov.eugene@gmail.com"
] | chernyshov.eugene@gmail.com |
9ea3a93d380538a95060def4a48b25f559346ddf | c8836d495a97c1c5183169d7c33c909ae579f69d | /worldcup/worldcup/matches/load_stage2_matches.py | db3ed408225dfda250c4b39e72393539ba1e7be8 | [
"MIT"
] | permissive | raprasad/worldcup | 4042f54231083d627d9cb9c9ed764eaf5914984e | e993bb3345d4be339211fac2698832886973ec7c | refs/heads/master | 2021-01-01T19:42:49.034715 | 2014-05-23T20:39:30 | 2014-05-23T20:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | from datetime import datetime
from worldcup.matches.models import *
"""
# match num team 1 team 2 grid group date time
matches = '''49 1a 2b 1 6/26 16:00 Nelson Mandela Bay
50 1c 2d 1 6/26 20:30 Rustenburg
51 1d 2c 2 6/27 16:00 Mangaung / Bloemfontein
52 1b 2a 2 6/27 20:30 Johannesburg
53 1e 2f 3 6/28 16:00 Durban
54 1g 2h 3 6/28 20:30 Johannesburg
55 1f 2e 4 6/29 16:00 Tshwane/Pretoria
56 1h 2g 4 6/29 20:30 Cape Town
57 53 54 7/2 16:00 Nelson Mandela Bay
58 49 50 7/2 20:30 Johannesburg
59 52 51 7/3 16:00 Cape Town
60 55 56 7/3 20:30 Johannesburg
61 1 3 7/6 20:30 Cape Town
62 2 4 7/7 20:30 Durban
63 1,3 2,4 7/11 20:30 Nelson Mandela Bay
64 1,3 2,4 7/12 20:30 Johannesburg'''.split('\n')
from worldcup.matches.models import *
from worldcup.teams.models import get_team_not_determined
from datetime import datetime
try:
match_type = MatchType.objects.get(name="Knockout Stage")
except MatchType.DoesNotExist:
match_type =MatchType(name="Knockout Stage", last_day_to_predict=datetime(year=2010,month=6,day=25))
match_type.save()
for line in matches:
items = line.split('\t')
print items
match_num, team1_choices, team1_choices, grid_group, dt, time_str, venue_str = items
mm, dd = dt.split('/')
hh, min = time_str.split(':')
dt_obj = datetime(year=2010, month=int(mm), day=int(dd), hour=int(hh), minute=int(min))
team1 = get_team_not_determined() #Team.objects.get(name=team1)
team2 = get_team_not_determined() #Team.objects.get(name=team2)
venues = Venue.objects.filter(name__startswith=venue_str)
if venues.count() == 0:
print 'venue not found: %s' % venue_str
else:
venue=venues[0]
match_num = int(match_num)
if match_num == 64:
mname = 'Game %s: Final' % match_num
elif match_num == 63:
mname = 'Game %s: 3rd/4th Place' % match_num
elif match_num in [61, 62]:
mname = 'Game %s: Semi Finals' % match_num
elif match_num in range(57,61):
mname = 'Game %s: Quarter Finals' % match_num
else:
mname = 'Game %s: Round of 16' % match_num
new_match = Match(name=mname, team1=team1, team2=team2, match_type=match_type, match_time=dt_obj)
new_match.venue = venue
new_match.save()
"""
| [
"raman_prasad@harvard.edu"
] | raman_prasad@harvard.edu |
3beb0061ecb0b50b4fea93fde1963afe43fda6aa | 2b16a66bfc186b52ed585081ae987e97cab8223b | /test/db/test_APIAliasGenerate.py | c488b48d65177957b58186d5b9fd019dedf7f3cd | [] | no_license | OldPickles/SKnowledgeGraph | d334000c7a41dd5014fd59154bbe070fcc754e4c | 6d131ad6bf3a09a5ce6461fa03690117d703c9e8 | refs/heads/master | 2022-01-09T11:27:00.043712 | 2019-06-06T07:57:06 | 2019-06-06T07:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,717 | py | from unittest import TestCase
from db.alias_util import APIAliasGeneratorFactory
from db.model import APIAlias, APIEntity
class TestAPIAliasGenerate(TestCase):
def test_get_method_qualifier_parameter_type(self):
generator = APIAliasGeneratorFactory.create_generator(
APIAlias.ALIAS_TYPE_SIMPLE_METHOD_WITH_QUALIFIER_PARAMETER_TYPE)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.addAttribute(java.lang.String,java.lang.String,java.lang.String,java.lang.String,java.lang.String)",
"addAttribute(java.lang.String,java.lang.String,java.lang.String,java.lang.String,java.lang.String)",
APIAlias.ALIAS_TYPE_SIMPLE_METHOD_WITH_QUALIFIER_PARAMETER_TYPE)
def test_get_alias_name(self):
generator = APIAliasGeneratorFactory.create_generator(
APIAlias.ALIAS_TYPE_SIMPLE_CLASS_NAME_METHOD_WITH_QUALIFIER_PARAMETER_TYPE)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.addAttribute(java.lang.String,java.lang.String,java.lang.String,java.lang.String,java.lang.String)",
"Attributes2Impl.addAttribute(java.lang.String,java.lang.String,java.lang.String,java.lang.String,java.lang.String)",
APIAlias.ALIAS_TYPE_SIMPLE_CLASS_NAME_METHOD_WITH_QUALIFIER_PARAMETER_TYPE)
def test_get_simple_type_method_alias_name(self):
alias_type = APIAlias.ALIAS_TYPE_SIMPLE_NAME_METHOD_WITH_SIMPLE_PARAMETER_TYPE
generator = APIAliasGeneratorFactory.create_generator(
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.addAttribute.drainTo(java.shared.Collection<? super java.shared.concurrent.PriorityBlockingQueue>)",
"drainTo(Collection<? super PriorityBlockingQueue>)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.sort(T[],java.shared.Comparator<? super T>)",
"sort(T[],Comparator<? super T>)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.compute(java.lang.Object,java.shared.function.BiFunction<? super,? super,? extends java.lang.Object>)",
"compute(Object,BiFunction<? super,? super,? extends Object>)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.getBoolean(java.lang.String)",
"getBoolean(String)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.createXMLStreamWriter(OutputStream,java.lang.String)",
"createXMLStreamWriter(OutputStream,String)",
alias_type)
def test_get_simple_type_class_method_alias_name(self):
alias_type = APIAlias.ALIAS_TYPE_SIMPLE_CLASS_NAME_METHOD_WITH_SIMPLE_PARAMETER_TYPE
generator = APIAliasGeneratorFactory.create_generator(
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.drainTo(java.shared.Collection<? super java.shared.concurrent.PriorityBlockingQueue>)",
"Attributes2Impl.drainTo(Collection<? super PriorityBlockingQueue>)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.sort(T[],java.shared.Comparator<? super T>)",
"Attributes2Impl.sort(T[],Comparator<? super T>)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.compute(java.lang.Object,java.shared.function.BiFunction<? super,? super,? extends java.lang.Object>)",
"Attributes2Impl.compute(Object,BiFunction<? super,? super,? extends Object>)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.getBoolean(java.lang.String)",
"Attributes2Impl.getBoolean(String)",
alias_type)
self.one_name(generator,
"org.xml.sax.ext.Attributes2Impl.createXMLStreamWriter(OutputStream,java.lang.String)",
"Attributes2Impl.createXMLStreamWriter(OutputStream,String)",
alias_type)
def test_get_camel_case_alias_name(self):
alias_type = APIAlias.ALIAS_TYPE_CAMEL_CASE_TO_SPACE
generator = APIAliasGeneratorFactory.create_generator(
alias_type)
examples = [
(
"org.xml.sax.ext.Attributes2Impl.drainTo(java.shared.Collection<? super java.shared.concurrent.PriorityBlockingQueue>)",
"drain To"),
("org.xml.sax.ext.Attributes2Impl",
"Attributes2 Impl"),
("Attributes2Impl.createXMLStreamWriter(OutputStream,String)",
"create XML Stream Writer",),
("sort", None),
("java.lang.Object", None)]
for example in examples:
self.one_name(generator, example[0], example[1], alias_type)
def one_name(self, generator, qualifier_name, right_answer, right_type):
aliases = generator.generate_aliases(
APIEntity(qualified_name=qualifier_name, api_type=APIEntity.API_TYPE_METHOD))
print aliases
if right_answer is not None:
self.assertEqual(aliases[0].alias,
right_answer)
self.assertEqual(aliases[0].type, right_type)
else:
self.assertEqual(aliases, [])
| [
"467701860@qq.com"
] | 467701860@qq.com |
bec3c4222a01732f770d2e955ab9729e86491cfe | e0b6b5708aa81fcb6f9bae26be06b7f15984274c | /leetcode/most-common-word/epicarts.py | 35e87f0d0999a72290e69ce5e72d45587680d89f | [] | no_license | DongLee99/algorithm-study | aab021b71f04140bbad2842b868ef063e7bf1117 | aebe1bc4e461be47b335337c6f05d6a25e19e80a | refs/heads/master | 2023-03-18T05:39:52.708544 | 2021-03-17T14:56:05 | 2021-03-17T14:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import re
from typing import List
import collections
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
words = re.sub('[^\w]', ' ', paragraph).lower().split()
words = [word for word in words if word not in banned]
counts = collections.Counter(words)
return counts.most_common(1)[0][0]
paragraph = "Bob hit a ball, the hit BALL flew far after it was hit."
banned = ["hit"]
print(Solution().mostCommonWord(paragraph, banned))
| [
"0505zxc@gmail.com"
] | 0505zxc@gmail.com |
2b585ec0903bda35bd59d65a1be1bd1fa14c9967 | a2c7bc7f0cf5c18ba84e9a605cfc722fbf169901 | /python_1001_to_2000/1167_Minimum_Cost_to_Connect_Sticks.py | 54e5622039c7c0898974b8651fd030e7ebadff08 | [] | no_license | jakehoare/leetcode | 3bf9edd499034ce32be462d4c197af9a8ed53b5d | 05e0beff0047f0ad399d0b46d625bb8d3459814e | refs/heads/master | 2022-02-07T04:03:20.659422 | 2022-01-26T22:03:00 | 2022-01-26T22:03:00 | 71,602,471 | 58 | 38 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/minimum-cost-to-connect-sticks/
# You have some sticks with positive integer lengths.
# You can connect any two sticks of lengths X and Y into one stick by paying a cost of X + Y.
# You perform this action until there is one stick remaining.
# Return the minimum cost of connecting all the given sticks into one stick in this way.
# Since the cost is proportional to the lengths of sticks connected, we want to connect the shortest sticks.
# Maintain a heap and repeatedly connect the shortest 2 sticks, putting the connected stick back on the heap.
# Time - O(n log n)
# Space - O(n)
import heapq
class Solution(object):
def connectSticks(self, sticks):
"""
:type sticks: List[int]
:rtype: int
"""
cost = 0
heapq.heapify(sticks)
while len(sticks) > 1:
x, y = heapq.heappop(sticks), heapq.heappop(sticks)
cost += x + y
heapq.heappush(sticks, x + y)
return cost
| [
"jake_hoare@hotmail.com"
] | jake_hoare@hotmail.com |
219c8305644dc74a59f5c9a419b18ef748fdc326 | 8af3b7ed8c4694dd0109de50e9b235ec35838d02 | /src/purchase/utils.py | a9ae4a00cd9510a7c0bd6b8a62347aad8bb1895f | [
"MIT",
"LicenseRef-scancode-free-unknown"
] | permissive | vishalhjoshi/croma | 9b8640a9ce46320e865211c31fb3b4b503d47f6f | 5b033a1136a9a8290118801f0e7092aebd9d64cc | refs/heads/master | 2020-06-19T14:57:42.909264 | 2019-05-16T20:10:58 | 2019-05-16T20:10:58 | 196,753,381 | 1 | 0 | MIT | 2019-07-13T18:23:29 | 2019-07-13T18:23:29 | null | UTF-8 | Python | false | false | 1,979 | py |
def create_PurchaseInvDtl_JSON_QuerySet(PurInvDtl_queryset):
sale_dtl_item_arr = []
for item in PurInvDtl_queryset:
item_set = {}
batch_instance = item.get_batch_instance()
item_set['item_name'] = str(item.get_item_name())
item_set['item_id'] = str(item.get_item_id())
item_set['batch_no'] = str(item.batch_no)
try:
item_set['expiry'] = str(batch_instance.expiry.strftime("%Y-%m"))
except:
item_set['expiry'] = "-"
item_set['strip_qty'] = int(item.strip_qty)
item_set['nos_qty'] = int(item.nos_qty)
item_set['strip_free'] = int(item.strip_free)
item_set['nos_free'] = int(item.nos_free)
item_set['rate'] = float(item.rate)
try:
item_set['mrp'] = float(batch_instance.mrp)
except:
item_set['mrp'] = 0.00
try:
item_set['pur_rate'] = float(batch_instance.strip_pur)
except:
item_set['pur_rate'] = 0.00
item_set['amount'] = float(item.amount)
item_set['discount'] = float(item.discount)
item_set['disc_amt'] = float(item.disc_amt)
item_set['disc_type'] = str(item.disc_type)
item_set['excise'] = float(item.excise)
item_set['excise_type'] = str(item.excise_type)
item_set['other_charge'] = float(item.other_charge)
item_set['conv'] = float(item.get_unit_conv())
item_set['sgst_amt'] = float(item.sgst_amt)
item_set['cgst_amt'] = float(item.cgst_amt)
gst = item.get_item_gst()
item_set['sgst'] = float(gst[1])
item_set['cgst'] = float(gst[0])
try:
item_set['trade_rate'] = float(batch_instance.trade_rt)
item_set['std_rate'] = float(batch_instance.std_rt)
item_set['inst_rate'] = float(batch_instance.inst_rt)
item_set['strip_stock'] = int(batch_instance.strip)
item_set['nos_stock'] = int(batch_instance.nos)
except:
item_set['trade_rate'] = 0.00
item_set['std_rate'] = 0.00
item_set['inst_rate'] = 0.00
item_set['strip_stock'] = 0
item_set['nos_stock'] = 0
item_set['deleted'] = 0
sale_dtl_item_arr.append(item_set)
return sale_dtl_item_arr
| [
"jaisinghal48@gmail.com"
] | jaisinghal48@gmail.com |
dd4bd950a39ea20b33814bbe7e882ba1ea2b1bc3 | f4e8c8294b23fe070a763b527bc2f75ccdebaab1 | /leetCodePython/83.remove-duplicates-from-sorted-list.py | f829f2439febd64241b2d38c02332bae0a292c3b | [] | no_license | HOZH/leetCode | aff083b33e9b908490e7e992a0ad192ee31cc2e5 | a0ab59ba0a1a11a06b7086aa8f791293ec9c7139 | refs/heads/master | 2023-08-17T08:44:07.884861 | 2023-08-08T19:47:42 | 2023-08-08T19:47:42 | 136,558,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | #
# @lc app=leetcode id=83 lang=python3
#
# [83] Remove Duplicates from Sorted List
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteDuplicates(self, head):
current = head
if current:
prev = current
while current:
while current.next:
if current.next.val == current.val:
current = current.next
else:
break
temp = current.next
prev.next = temp
prev = current = temp
current = prev
return head
| [
"hong.zheng@stonybrook.edu"
] | hong.zheng@stonybrook.edu |
7453eac3ad4a2c9b553a59c2694a0a7c020a5d8a | 0343de40021f8dd72fb9a6cb31b5d2f24ccd7971 | /utilities/wake_models_mean/util.py | 17331324b776f80e56a804d954b718b2ef2a65bd | [] | no_license | sebasanper/WINDOW_dev | 47ae9252e6fadb2a3b1a0aae3383681a7955f4ea | 3c6437a777f2fc3be1dfd3d53b5d2ed25281c55c | refs/heads/master | 2021-01-01T19:45:02.555727 | 2018-05-21T20:27:56 | 2018-05-21T20:27:56 | 98,670,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def interpolate(minx, miny, maxx, maxy, valx):
# print maxx, minx
return miny + (maxy - miny) * ((valx - minx) / (maxx - minx))
| [
"s.sanchezperezmoreno@tudelft.nl"
] | s.sanchezperezmoreno@tudelft.nl |
f98901e3b2917cb13dcbd40a6f3479c0b87c6840 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03131/s120646921.py | 522b4214f36258ffb62506f2aa2bb22339794d1f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | K,A,B= map(int,input().split())
ans = 1
if (B-A) <= 2:
ans += K
else:
if B - A <=2:
ans += K
else:
ans += A - 1
if (K-(A-1)) % 2 == 0:
ans += (((K-(A-1))//2)) * (B - A)
else:
ans += (((K-(A-1))//2)) * (B - A) + 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ce5e7eeed67ed5e94efb5bf6fec120007d0c6a1f | b35f86d9746f782dca05765a3c2d31e1e7a511b8 | /src/tubesite/models.py | df26a20d6bdd7017711e39e94258e224d255cf08 | [] | no_license | jccode/cutetube | 5f28f5c242c5213c91590ef82f97934257cb2b42 | 0a91745ca9adccab68ffb3d742d8fcc5c4b52f2b | refs/heads/master | 2020-12-13T21:47:30.129097 | 2017-07-31T01:36:53 | 2017-07-31T01:36:53 | 95,474,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.safestring import mark_safe
# from django.contrib.postgres.fields import JSONField
# from jsonfield import JSONField
from .utils import JSONField
from filer.fields.image import FilerImageField
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=50)
#image = models.ImageField(upload_to="category")
image = FilerImageField()
def image_thumbnail(self):
if self.image:
s = '<img src="{src}" alt="{name}"/>'.format(src=self.image.thumbnails['admin_tiny_icon'], name=self.image.label)
return mark_safe(s)
else:
return '(No image)'
image_thumbnail.short_description = 'Thumb'
def __unicode__(self):
return self.name
class Video(models.Model):
QUALITY_CHOICES = (
(0, 'Normal'),
(1, 'HD'),
)
category = models.ForeignKey('Category', blank=True, null=True)
name = models.CharField(max_length=100, blank=True)
desc = models.TextField(max_length=200, blank=True)
poster = models.URLField()
src = models.URLField()
duration = models.IntegerField()
quality = models.IntegerField(choices=QUALITY_CHOICES, default=0)
multiple = models.BooleanField(default=False)
extra = JSONField(blank=True, null=True)
"""
extra json format:
{ videos: [{src:"", poster:"", duration:"" }, ], }
"""
def poster_thumbnail(self):
if self.poster:
poster_src = self.poster
s = '<img src="{src}" alt="{name}" width="32" height="32"/>'.format(src=poster_src, name=self.name)
return mark_safe(s)
else:
return '(No poster)'
poster_thumbnail.short_description = 'Thumb'
def quality_str(self):
choices = {c[0]: c[1] for c in self.QUALITY_CHOICES}
return choices[self.quality]
def __unicode__(self):
return self.name if self.name is not None else self.poster
| [
"junchangchen@gmail.com"
] | junchangchen@gmail.com |
6f96e83991bae143e3344852c09a992a151e54a7 | fe3759747f709a41e5ff3acf78872dd6b74f772a | /samples/openapi3/client/petstore/python-experimental/petstore_api/model/dog_all_of.py | 82cee4c2f86b5e935a3f34a13f73fa89ec48000a | [
"Apache-2.0"
] | permissive | Januson/openapi-generator | c50e3b52765e41adba9712d745918cea39dfa490 | 5b6b4c9d4829b57716741dc35b3f1033e5483784 | refs/heads/master | 2022-10-19T04:16:38.042495 | 2022-04-23T08:42:21 | 2022-04-23T08:42:21 | 238,659,737 | 0 | 0 | Apache-2.0 | 2023-09-05T01:01:23 | 2020-02-06T10:12:38 | Java | UTF-8 | Python | false | false | 1,945 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class DogAllOf(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
breed = StrSchema
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
breed: typing.Union[breed, Unset] = unset,
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'DogAllOf':
return super().__new__(
cls,
*args,
breed=breed,
_configuration=_configuration,
**kwargs,
)
| [
"noreply@github.com"
] | Januson.noreply@github.com |
3492b0d6ce2c684a730e99b0b923f6eb900e6355 | b30216e0e7d5181da3430087c1deb121f0c58dd5 | /Accounts/migrations/0003_usernames_image.py | c4ccec940e692346590414372a99b33347dc62fd | [
"MIT"
] | permissive | kurianbenoy/ShareGoods | ac360f7523636f3d0f5c758aac19ccd7de6769d8 | 5231a0ec2ad3ef22fa5da1c2ab2ba5a654ba75e0 | refs/heads/master | 2020-03-24T12:08:16.263168 | 2018-08-10T17:48:24 | 2018-08-10T17:48:24 | 142,704,517 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-07-29 03:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Accounts', '0002_auto_20180729_0818'),
]
operations = [
migrations.AddField(
model_name='usernames',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='uploads/'),
),
]
| [
"kurian.pro@gmail.com"
] | kurian.pro@gmail.com |
0ecfe9bc8614ef986ade28e1ce80d6929d627709 | 187ba0b860c1311e1e931825ad207b57d673d46d | /docs/conf.py | e64e7747d4c0c6f6ba851b43f3e40d6cbf5cbc0e | [
"MIT"
] | permissive | acgh213/jaraco.nxt | 698a230f7fd80e046741300182f283065900a3cd | 954f7dc95ac7c82e06b8d21a26b5d654ced31c9f | refs/heads/master | 2021-05-30T02:10:10.287918 | 2015-11-30T01:14:47 | 2015-11-30T01:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import setuptools_scm
extensions = [
'sphinx.ext.autodoc',
]
# General information about the project.
project = 'jaraco.nxt'
copyright = '2015 Jason R. Coombs'
# The short X.Y version.
version = setuptools_scm.get_version(root='..', relative_to=__file__)
# The full version, including alpha/beta/rc tags.
release = version
master_doc = 'index'
| [
"jaraco@jaraco.com"
] | jaraco@jaraco.com |
407869ec28a72c22c3176031a91d984739fd1356 | a0757f4148f4bcc0ae2a499267c884e2c04c389d | /posts/migrations/0001_initial.py | 9da7662c853bb3787e6740e2f5c0c1441eb711f9 | [] | no_license | hkailee/sequencinggo | 7a1a11889e46b4b4aba0aeda6e50ff5d74c8f65b | e118f5815ac6ad41053a7edcaacb00f4a414f2a2 | refs/heads/master | 2021-01-21T03:21:46.672069 | 2016-12-03T05:58:33 | 2016-12-03T05:58:33 | 69,310,426 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,570 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import taggit.managers
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
],
options={
'ordering': ('created',),
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200, unique_for_date='created')),
('image', models.ImageField(blank=True, null=True, upload_to='images/%Y/%m/%d')),
('description', models.TextField(blank=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('tags', taggit.managers.TaggableManager(verbose_name='Tags', to='taggit.Tag', through='taggit.TaggedItem', help_text='A comma-separated list of tags.')),
('user', models.ForeignKey(related_name='posts_created', to=settings.AUTH_USER_MODEL)),
('users_like', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, related_name='posts_liked')),
],
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(related_name='comments', to='posts.Post'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(related_name='comments_created', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='users_like',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, related_name='comments_liked'),
),
]
| [
"leehongkai@gmail.com"
] | leehongkai@gmail.com |
b886898739062d97e735446d5063b42c56bd703c | b385f39c5b701fb6f22796ab951872257ae8398a | /exercicios-secao04/exercicio10.py | 366c4faf0f212965a046781b9c4425271372735f | [
"MIT"
] | permissive | EhODavi/curso-python | 5c97a6913bad198ae590519287ed441c95399d80 | cf07e308be9d7516f2cfe7f21c539d214c836979 | refs/heads/main | 2023-08-07T13:44:46.608118 | 2021-06-14T21:40:50 | 2021-06-14T21:40:50 | 356,542,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | velocidade_km_h = float(input('Informe a velocidade em km/h: '))
velocidade_m_s = velocidade_km_h / 3.6
print(f'{velocidade_km_h} km/h corresponde à {velocidade_m_s} m/s')
| [
"davi.miau@gmail.com"
] | davi.miau@gmail.com |
c8e3877940df449f6b89f121031545c98b5c2f26 | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /0.22/_downloads/6448e1cdd25e7f92168993a74b81b311/plot_psf_ctf_vertices.py | 26db8d8e8b32a53e728539452c6217f69cca5f9b | [] | permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 3,440 | py | # -*- coding: utf-8 -*-
"""
Plot point-spread functions (PSFs) and cross-talk functions (CTFs)
==================================================================
Visualise PSF and CTF at one vertex for sLORETA.
"""
# Authors: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_resolution_matrix, get_cross_talk,
get_point_spread)
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_evo = data_path + '/MEG/sample/sample_audvis-ave.fif'
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# forward operator with fixed source orientations
mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=True, copy=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution
# free source orientation
inverse_operator = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'MNE' # can be 'MNE' or 'sLORETA'
# compute resolution matrix for sLORETA
rm_lor = make_inverse_resolution_matrix(forward, inverse_operator,
method='sLORETA', lambda2=lambda2)
# get PSF and CTF for sLORETA at one vertex
sources = [1000]
stc_psf = get_point_spread(rm_lor, forward['src'], sources, norm=True)
stc_ctf = get_cross_talk(rm_lor, forward['src'], sources, norm=True)
##############################################################################
# Visualize
# ---------
# PSF:
# Which vertex corresponds to selected source
vertno_lh = forward['src'][0]['vertno']
verttrue = [vertno_lh[sources[0]]] # just one vertex
# find vertices with maxima in PSF and CTF
vert_max_psf = vertno_lh[stc_psf.data.argmax()]
vert_max_ctf = vertno_lh[stc_ctf.data.argmax()]
brain_psf = stc_psf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir)
brain_psf.show_view('ventral')
brain_psf.add_text(0.1, 0.9, 'sLORETA PSF', 'title', font_size=16)
# True source location for PSF
brain_psf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of PSF
brain_psf.add_foci(vert_max_psf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
###############################################################################
# CTF:
brain_ctf = stc_ctf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir)
brain_ctf.add_text(0.1, 0.9, 'sLORETA CTF', 'title', font_size=16)
brain_ctf.show_view('ventral')
brain_ctf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of CTF
brain_ctf.add_foci(vert_max_ctf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
###############################################################################
# The green spheres indicate the true source location, and the black
# spheres the maximum of the distribution.
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
a0be7c11d79e85d00ece19127980488c6049cee5 | 2933b8f123981f88e576e7ba5baa5143c3667019 | /_testing/utils/subscription.py | cce06a6f7d97a2c2279aed54db1c9095aa986f69 | [
"MIT"
] | permissive | MatthiasLienhard/pysmartnode | c6a9b4819cddcf1b928eecc385cef087a832e5cb | f853efb3378d881dd2e62eec2ca1621898953c19 | refs/heads/master | 2020-09-04T09:31:45.302398 | 2019-07-07T06:14:55 | 2019-07-07T06:14:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,829 | py | '''
Created on 18.02.2018
@author: Kevin K�ck
'''
__version__ = "0.2"
__updated__ = "2018-03-09"
import gc
from pysmartnode.utils.wrappers.timeit import timeit
memory = gc.mem_free()
gc.collect()
def printMemory(info=""):
global memory
memory_new = gc.mem_free()
print("[RAM] [{!s}] {!s}".format(info, memory_new - memory))
memory = memory_new
def creating():
gc.collect()
printMemory("Start")
from pysmartnode.utils.subscriptionHandlers.subscription import SubscriptionHandler
gc.collect()
printMemory("After import")
global handler
handler = SubscriptionHandler()
gc.collect()
printMemory("After handler creation")
@timeit
def addObjects():
for j in range(0, 3):
for i in range(0, 10):
handler.addObject("home/235j094s4eg/device{!s}/htu{!s}".format(j, i), "func{!s}".format(i))
@timeit
def getObject():
return handler.getFunctions("home/235j094s4eg/device2/htu9")
@timeit
def getObjectDirectly():
return handler.get("home/235j094s4eg/device2/htu9", 1)
@timeit
def addObjectsList():
for j in range(0, 3):
for i in range(0, 10):
a.append(("home/235j094s4eg/device{!s}/htu{!s}".format(j, i), "func{!s}".format(i)))
@timeit
def getObjectList():
for i in a:
if i[0] == "home/235j094s4eg/device3/htu9":
return i[1]
def speedtest():
creating()
gc.collect()
printMemory("after creation with no Objects")
addObjects()
gc.collect()
printMemory("30 Objects")
print(getObject())
gc.collect()
print(getObjectDirectly())
gc.collect()
printMemory("Subscription test done")
print("Comparison to list")
global a
a = []
gc.collect()
printMemory("List created")
addObjectsList()
gc.collect()
printMemory("Added 30 objects to list")
print(getObjectList())
gc.collect()
printMemory("List comparison done")
speedtest()
print("Functional test")
def test():
from pysmartnode.utils.subscriptionHandlers.subscription import SubscriptionHandler
handler = SubscriptionHandler()
handler.addObject("home/test/htu", "func1")
handler.addObject("home/test2/htu", "func2")
handler.addObject("home/test3/htu2", "func3")
print(handler.getFunctions("home/test/htu"))
print(handler.getFunctions("home/test2/htu"))
print(handler.getFunctions("home/test3/htu2"))
handler.setFunctions("home/test3/htu2", "func_test")
print(handler.getFunctions("home/test3/htu2"))
try:
print(handler.getFunctions("home/test5/htu2"))
except Exception as e:
print(e)
handler.removeObject("home/test2/htu")
try:
print(handler.getFunctions("home/test2/htu"))
except Exception as e:
print(e)
print(handler.getFunctions("home/test3/htu2"))
handler.addObject("home/1325/ds18", "funcDS")
print("Multiple subscriptions test")
handler.addObject("home/1325/ds18", "funcDS2")
print("ds18", handler.get("home/1325/ds18", 1))
return handler
test()
print("Test finished")
"""
>>> from _testing.utils import subscription
[RAM] [Start] -336
[RAM] [After import] -992
[RAM] [After handler creation] -32
[RAM] [after creation with no Objects] 0
[Time] Function addObjects: 612.455ms
[RAM] [30 Objects] -3552
[Time] Function getObject: 5.920ms
func9
[Time] Function getObjectDirectly: 5.813ms
func9
[RAM] [Subscription test done] 0
Comparison to list
[RAM] [List created] -32
[Time] Function addObjectsList: 496.705ms
[RAM] [Added 30 objects to list] -2704
[Time] Function getObjectList: 2.223ms
None
[RAM] [List comparison done] 0
Functional test
func1
func2
func3
func_test
Object home/test5/htu2 does not exist
Object home/test2/htu does not exist
func_test
Multiple subscriptions test
ds18 ['funcDS', 'funcDS2']
Test finished
"""
| [
"kevinkk525@users.noreply.github.com"
] | kevinkk525@users.noreply.github.com |
f72325a934916e12a434f42884e773a3cba383ff | 18e10db2ac29420dadf40fc1185091a1e827d6b8 | /tools/auto_freeze.py | 7585016ac6fde70896d9e67ce36bcbfb70d0f881 | [
"Apache-2.0"
] | permissive | liuxiaoliu321/faceboxes-tensorflow | a853fb86b8c1ee895028e4ce35f141f8f3ff158c | 39d12704cf9c1da324bb439b6e8a72c8b4d01d34 | refs/heads/master | 2020-08-08T08:05:57.379527 | 2019-09-25T04:06:25 | 2019-09-25T04:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py |
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
model_folder = './model'
checkpoint = tf.train.get_checkpoint_state(model_folder)
##input_checkpoint
input_checkpoint = checkpoint.model_checkpoint_path
##input_graph
input_meta_graph = input_checkpoint + '.meta'
##output_node_names
output_node_names='tower_0/images,tower_0/boxes,tower_0/scores,tower_0/num_detections,training_flag'
#output_graph
output_graph='./model/detector.pb'
print('excuted')
command="python tools/freeze.py --input_checkpoint %s --input_meta_graph %s --output_node_names %s --output_graph %s"\
%(input_checkpoint,input_meta_graph,output_node_names,output_graph)
os.system(command) | [
"2120140200@mail.nankai.edu.cn"
] | 2120140200@mail.nankai.edu.cn |
6164a6e1eeaf57eb5e6c08c6f244aee8f956ada5 | 36943501114b8c6bfd59d248507140956a0d7c29 | /PSCC20/diagram2.py | 08914fee7ab952f524ee6b2c67f9f772837cc2c6 | [] | no_license | Constancellc/Demand-Model | 9c45ea1a9d9779c9da093f221a5851b168a45eac | e261371b7aa9aea113617a7cbe8176a5c5d9591c | refs/heads/master | 2021-01-13T10:32:40.243847 | 2020-08-21T09:49:14 | 2020-08-21T09:49:14 | 76,476,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | import matplotlib.pyplot as plt
import numpy as np
import random
from scenarios import generate
from cvxopt import matrix, spdiag, sparse, solvers, spmatrix
scen = generate(5,10)
def optimise(scen,en):
Q = spdiag([1.0]*48)
p = [0.0]*48
for i in range(len(scen)):
for t in range(48):
p[t] += scen[i][0]*scen[i][t+1]
p = matrix(p)
A = matrix(1.0/2,(1,48))
b = matrix([float(en)])
G = spdiag([-1.0]*48)
h = matrix([0.0]*48)
sol=solvers.qp(Q,p,G,h,A,b)
x = sol['x']
return x
def get_range(profiles,offset=0,new=[0]*48):
_p1 = []
_p2 = []
_p3 = []
_p4 = []
_p5 = []
for t in range(48):
y = []
for i in range(len(profiles)):
y.append(profiles[i][t+offset])
y = sorted(y)
_p1.append(y[int(len(y)*0.1)]+new[t])
_p2.append(y[int(len(y)*0.3)]+new[t])
_p3.append(y[int(len(y)*0.5)]+new[t])
_p4.append(y[int(len(y)*0.7)]+new[t])
_p5.append(y[int(len(y)*0.9)]+new[t])
return _p1,_p2,_p3,_p4,_p5
x = optimise(scen,100000)
plt.figure()
plt.figure(figsize=(6,3))
plt.rcParams["font.family"] = 'serif'
plt.rcParams["font.size"] = '10'
plt.subplot(1,2,1)
_p1,_p2,_p3,_p4,_p5 = get_range(scen,offset=1)
plt.fill_between(range(48),_p1,_p5,color='#bfddff')
plt.fill_between(range(48),_p2,_p4,color='#80bbff')
plt.yticks([10000,20000,30000,40000,50000],['10','20','30','40','50'])
plt.xticks([7.5,23.5,39.5],['04:00','12:00','20:00'])
plt.plot(_p3,c='#0077ff')
plt.grid(ls=':')
plt.ylabel('Power (GW)')
plt.title('Before')
plt.xlim(0,47)
plt.ylim(20000,50000)
plt.subplot(1,2,2)
_p1,_p2,_p3,_p4,_p5 = get_range(scen,offset=1,new=x)
plt.fill_between(range(48),_p1,_p5,color='#bfddff')
plt.fill_between(range(48),_p2,_p4,color='#80bbff')
plt.yticks([20000,30000,40000,50000],['20','30','40','50'])
plt.xticks([7.5,23.5,39.5],['04:00','12:00','20:00'])
plt.plot(_p3,c='#0077ff')
plt.grid(ls=':')
plt.title('After')
plt.xlim(0,47)
plt.ylim(20000,50000)
plt.tight_layout()
plt.savefig('../../Dropbox/papers/PSCC-20/img/optimise.eps', format='eps', dpi=300,
bbox_inches='tight', pad_inches=0)
plt.figure()
plt.figure(figsize=(5,3))
plt.rcParams["font.family"] = 'serif'
plt.rcParams["font.size"] = '10'
plt.plot(scen[0][1:],c='k',ls=':',label='Existing Demand')
x = optimise([[1.0]+scen[15][1:]],10000)
p = []
for t in range(48):
p.append(x[t]+scen[15][t+1])
plt.plot(p,c='b',label='With Smart Charging')
plt.grid(ls=':')
plt.legend()
plt.xlim(0,47)
plt.ylim(20000,50000)
plt.yticks([20000,30000,40000,50000],['20','30','40','50'])
plt.xticks([7.5,23.5,39.5],['04:00','12:00','20:00'])
plt.ylabel('Power (GW)')
plt.tight_layout()
plt.show()
#def optimise(scenarios,en):
| [
"constancellc@gmail.com"
] | constancellc@gmail.com |
4bb38afed0529d45e57f2e7e834157f55f5bfa9c | c660ae5554a682790061c4ed62a4bc19b4890250 | /PruDigital/Django_project/Testform/student_formapp/views.py | 580578a31939097c7bf71f85a499323d13acb9f1 | [] | no_license | saurabhshukla01/Best-wsbsite | 4a63d8e21307cf7f93e240e7ef871477a23cd912 | 04f5baba1807f71309bebfd2b6675ff408b7e25e | refs/heads/master | 2022-05-30T03:32:03.609791 | 2020-04-21T09:06:23 | 2020-04-21T09:06:23 | 257,498,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
'''
def register(request):
return HttpResponse('index form')
'''
def register_student(request):
return render(request,'register_student.html')
def show_student(request):
stu_dict ={}
stu_fname= request.POST.get('fname')
stu_lname= request.POST.get('lname')
stu_email= request.POST.get('email')
stu_image= request.POST.get('file')
stu_username= request.POST.get('username')
password= request.POST.get('pwd')
confirm_password= request.POST.get('cpwd')
stu_dict = {'first_name' : stu_fname, 'last_name' :stu_lname , 'email' : stu_email , 'image_path' : stu_image ,
'username' : stu_username }
return render(request,'show_student.html',stu_dict) | [
"ss7838094755@gmail.com"
] | ss7838094755@gmail.com |
f72a1cbc16ee408ae31c70d2d7b8fbbc2a467f92 | d5125ccc1ef9915ffd72c575225a620aac5cb347 | /TriAquae/TriAquae/sbin/tri_service.py | 6a274b9873ac6a2e90ebaec04a55f064edb39479 | [] | no_license | yurui829/stefanbo | 2231074e0e4f04438aff647563299ad1947bd760 | 449f862c81a3b4ae3e079ecb4a15b3a5cbcca701 | refs/heads/master | 2021-01-24T23:42:52.064783 | 2014-07-02T03:05:04 | 2014-07-02T03:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,813 | py | #!/usr/bin/env python
import os,sys,time
import logger,tri_config,tri_module
import django
triaquae_path = tri_config.Working_dir
working_dir = logger.cur_dir
status_check_script = 'host_status_check.py'
snmp_monitor_script = 'multiprocessing_snmpMonitor.py'
service_log = '%s/tri_service.log' % tri_config.Log_dir
snmp_monitor_log = '%s/tri_snmp_service.log' % tri_config.Log_dir
def status_monitor(interval):
script = '%s/%s' %(working_dir,status_check_script)
print "Checking service status....."
if service_status(status_check_script) == 'Running':
print "\033[33;1mHost Status Monitor service is already running!\033[0m"
else:
print "Starting HOST Status Monitor Service...."
cmd = 'nohup python %s -s %s >> %s 2>&1 &' % (script,interval,service_log)
result = os.system(cmd)
if result == 0:
print '\033[32;1mHost status monitor service started successfully!\n\033[0m'
def snmp_monitor():
script = '%s/%s' %(working_dir,snmp_monitor_script)
print "Checking snmp status....."
if service_status(snmp_monitor_script) == 'Running':
print "\033[33;1mTriAquae SNMP monitor service is already running!\033[0m\n"
else:
print "Starting TriAquae SNMP monitor service...."
cmd = 'nohup python %s >> %s 2>&1 &' % (script, snmp_monitor_log)
result = os.system(cmd)
if result == 0:
print '\033[32;1mTriAquae SNMP monitor service started successfully!\n\033[0m'
def shellinabox():
if service_status('shellinaboxd') == 'Running':
print "\033[33;1mshellinaboxd service is already running!\033[0m"
else:
cmd = '%s/shellinaboxd/bin/shellinaboxd -t -b' % triaquae_path
if os.system(cmd) == 0:
print '\033[32;1mshellinaboxd start success!\n\033[0m'
else:
print '\033[31;1mshellinaboxd start failed!\n\033[0m'
def wsgiserver():
if service_status('runwsgiserver') == 'Running':
print "\033[33;1mrunwsgiserver service is already running!\033[0m"
else:
cmd = 'nohup python %s/manage.py runwsgiserver host=0.0.0.0 port=7000 staticserve=collectstatic >>%s 2>&1 &' % (triaquae_path,service_log)
if os.system(cmd) == 0:
print '\033[32;1mwsgi server start success!\n\033[0m'
else:
print '\033[31;1mwsgi server start failed!\n\033[0m'
def stop_service(service_name):
cmd = "ps -ef| grep %s|grep -v grep |awk '{print $2}'|xargs kill -9" %(service_name)
if service_status(service_name) == 'Running':
cmd_result = os.system(cmd)
if cmd_result == 0:
print '..............\n'
time.sleep(1)
print '\033[31;1m%s stopped! \033[0m' % service_name
else:
print '\033[31;1mCannot stop %s service successfully,please manually kill the pid!\033[0m' % service_name
else:
print 'Service is not running...,nothing to kill! '
def service_status(service_name):
cmd = "ps -ef |grep %s|grep -v grep |awk '{print $2}'" % service_name
result = os.popen(cmd).read().strip()
try:
service_pid = result.split()[0]
if service_pid:
print "\033[32;1m%s monitor service is running...\033[0m" % service_name
return "Running"
except IndexError:
print "\033[31;1m%s service is not running....\033[0m" % service_name
return "Dead"
try:
if sys.argv[1] == 'start':
status_monitor(30)
snmp_monitor()
shellinabox()
wsgiserver()
elif sys.argv[1] == 'stop':
stop_service(snmp_monitor_script)
stop_service(status_check_script)
stop_service('shellinaboxd')
stop_service('runwsgiserver')
elif sys.argv[1] == 'status':
service_status(snmp_monitor_script)
service_status(status_check_script)
service_status('shellinaboxd')
service_status('runwsgiserver')
except IndexError:
print 'No argument detected!\nUse: stop|start|status'
| [
"stefan_bo@163.com"
] | stefan_bo@163.com |
3014be40eabec0a1d1eeadd68d766eda03badafa | 6e42ce3512cefa970163bb105a736eba5c9bf902 | /cw/cw_9/cw_9_1_1.py | fa811bd2a26807f7c1b7471c3b21ca19b929c6d0 | [] | no_license | SKO7OPENDRA/gb-algorithm | 32f483e095b57d8c71cd5de9fa9bd2c00f0b6bc1 | 366ae8e58beae267d84baf8a28e6d4055e3a5bdb | refs/heads/master | 2020-12-26T22:40:34.640445 | 2020-02-27T16:16:44 | 2020-02-27T16:16:44 | 237,667,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # Что такое дерево
# Классификация дерева
# Создание деревьев в Python
# Самый простой способ создать дерево - создать свой собственный класс
from binarytree import tree, bst, Node, build
class MyNode:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
a = tree(height=4, is_perfect=False)
print(a)
b = bst(height=4, is_perfect=True)
print(b)
c = Node(7)
c.left = Node(3)
c.right = Node (1)
c.left = Node(5)
c.right.left = Node(9)
c.right.right = Node(13)
print(c) | [
"58439768+SKO7OPENDRA@users.noreply.github.com"
] | 58439768+SKO7OPENDRA@users.noreply.github.com |
83715fa1d47f3fc83e0b62ba1acc085c58e59ecd | 40c2bce56832d97797c115f60d1e0459fd4ebf93 | /Eclipse_Project_2/Single_Ton_Pattern_Class/test_exampleOne.py | dc3a8820461af7567fb646cf3d32ba7fabdd7054 | [] | no_license | amanoj319319319/Eclipse_Python_LastSeleniumTest | 0be2e7f615160248f329b4df0e9d109612b29560 | 4d0978e4c2dfe9c3a9d4b429f7ff6340278c0252 | refs/heads/master | 2023-04-27T09:14:38.726807 | 2021-05-19T08:18:40 | 2021-05-19T08:18:40 | 267,038,244 | 0 | 0 | null | 2021-05-19T08:17:45 | 2020-05-26T12:35:36 | Python | UTF-8 | Python | false | false | 2,889 | py | #working very fine
'''
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
@pytest.mark.usefixtures("setup")
class TestExampleOne:
def test_title(self):
ele=self.driver.find_element_by_id("name")
ele.send_keys("Test Python")
time.sleep(5)
def test_hide(self):
ele2=self.driver.find_element_by_id("displayed-text")
ele2.send_keys("Hai Manoj")
time.sleep(5)
'''
#working very fine
'''
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
@pytest.mark.usefixtures("setup")
class TestExampleOne:
@pytest.mark.run(order=2)
def test_title(self):
ele=self.driver.find_element_by_id("name")
ele.send_keys("Test Python")
time.sleep(5)
print("The title of the page is:-", self.driver.title)
@pytest.mark.run(order=1)
def test_hide(self):
ele2=self.driver.find_element_by_id("displayed-text")
ele2.send_keys("Hai Manoj")
time.sleep(5)
print ("The title of the page is:-",self.driver.title)
'''
#How to run it on the console
#pytest -v -s C:\Users\Manoj\PycharmProjects\LastSeleniumTest\Single_Ton_Pattern_Class\test_exampleOne.py
#working very fine
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import unittest
@pytest.mark.usefixtures("setup")
class TestExampleOne(unittest.TestCase):
@pytest.mark.run(order=2)
def test_title(self):
ele=self.driver.find_element_by_id("name")
ele.send_keys("Test Python")
time.sleep(5)
print("The title of the page is:-", self.driver.title)
@pytest.mark.run(order=1)
def test_hide(self):
ele2=self.driver.find_element_by_id("displayed-text")
ele2.send_keys("Hai Manoj")
time.sleep(5)
print ("The title of the page is:-",self.driver.title)
hide_button=self.driver.find_element_by_id("hide-textbox")
hide_button.click()
text_box=self.driver.find_element_by_id("displayed-text")
print ("is hide-button displayes:-",text_box.is_displayed())
self.driver.refresh()
@pytest.mark.run(order=3)
def test_another(self):
self.driver.get("https://letskodeit.teachable.com/")
login=self.driver.find_element_by_xpath("//*[@id='navbar']/div/div/div/ul/li[2]/a")
login.click()
time.sleep(6)
print ("Title of the facebook page is:-",self.driver.title)
| [
"a.manoj16@gmail.com"
] | a.manoj16@gmail.com |
5f9b268af9cb8089a970308ee8aa501616c68bbc | 6d80ce7a1f44ddf5741fd190ddfe0d9be8e5f162 | /model/detection_model/PSENet/util/__init__.py | 0851e4c84d2992a68efd02e1726176aecf9e3fbb | [
"MIT"
] | permissive | dun933/FudanOCR | dd8830ca4b8ebb08acd31326fcf5aa3c961886a0 | fd79b679044ea23fd9eb30691453ed0805d2e98b | refs/heads/master | 2021-04-03T19:50:47.646099 | 2020-03-16T08:43:59 | 2020-03-16T08:43:59 | 248,391,401 | 1 | 0 | MIT | 2020-03-19T02:23:11 | 2020-03-19T02:23:10 | null | UTF-8 | Python | false | false | 1,335 | py | import log
import dtype
# import plt
import np
import img
_img = img
import dec
import rand
import mod
import proc
# import test
import neighbour as nb
#import mask
import str_ as str
import io as sys_io
import io_ as io
import feature
import thread_ as thread
import caffe_ as caffe
# import tf
import cmd
import ml
import sys
import url
from .misc import *
from .logger import *
# log.init_logger('~/temp/log/log_' + get_date_str() + '.log')
def exit(code = 0):
sys.exit(0)
is_main = mod.is_main
init_logger = log.init_logger
def sit(img, path = None, name = ""):
if path is None:
_count = get_count();
path = '~/temp/no-use/images/%s_%d_%s.jpg'%(log.get_date_str(), _count, name)
if type(img) == list:
plt.show_images(images = img, path = path, show = False, axis_off = True, save = True)
else:
plt.imwrite(path, img)
return path
_count = 0;
def get_count():
global _count;
_count += 1;
return _count
def cit(img, path = None, rgb = True, name = ""):
_count = get_count();
if path is None:
img = np.np.asarray(img, dtype = np.np.uint8)
path = '~/temp/no-use/%s_%d_%s.jpg'%(log.get_date_str(), _count, name)
_img.imwrite(path, img, rgb = rgb)
return path
def argv(index):
return sys.argv[index]
| [
"576194329@qq.com"
] | 576194329@qq.com |
1ccd46595657e4e3591261fd55aab390e27ec8a7 | bc68b28995103a45c5050a418a009f5f0c075bdc | /Rosalind1/Prob26/deBruijn.py | 201c46d46d9c2452c65c16307a2d005cd80b2a5e | [] | no_license | kedarpujara/BioinformaticsAlgorithms | 71ca191c18ff8260d9edcd594c337b282b2a7188 | 4a8919d03af767704525e8a5610af5b811d5c873 | refs/heads/master | 2020-03-13T08:41:45.361941 | 2018-04-25T18:39:00 | 2018-04-25T18:39:00 | 131,048,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | class Vertex:
def __init__(self, n):
self.name = n
self.neighbors = list()
def add_neighbor(self, v):
if v not in self.neighbors:
self.neighbors.append(v)
self.neighbors.sort()
class Graph:
vertices = {}
def add_vertex(self, vertex):
if isinstance(vertex, Vertex) and vertex.name not in self.vertices:
self.vertices[vertex.name] = vertex
return True
else:
return False
def add_edge(self, u, v):
if u in self.vertices and v in self.vertices:
self.vertices[u].add_neighbor(v)
self.vertices[v].add_neighbor(u)
return True
else:
return False
def print_graph(self):
for key in sorted(list(self.vertices.keys())):
print(key + str(self.vertices[key].neighbors))
def suffixs(string):
length = len(string)
suffix = string[1:length]
return suffix
def prefixs(string):
length = len(string)
prefix = string[0:length-1]
return prefix
def deBruijnOld(patterns):
leftList = []
rightList = []
for string1 in patterns:
for string2 in patterns:
if suffixs(string1) == prefixs(string2):
leftList.append(string1)
rightList.append(string2)
break
return leftList, rightList
def deBruijn(inputString, k):
leftList = []
rightList = []
multiList = []
for i in range(len(inputString)-k+1):
string1 = inputString[i:i+k-1]
string2 = inputString[i+1:i+k]
if any(string1 in s for s in rightList):
newString = inputString[i:i+k-1] + inputString[i+1:i+k]
break
leftList.append(string2)
rightList.append(string1)
return rightList, leftList
def main():
inputFile = open("input.txt","r")
k = int(inputFile.readline().strip())
inputString = inputFile.readline().strip()
leftList, rightList = deBruijn(inputString, k)
output = open("output.txt", 'w')
for i in range(len(leftList)):
output.write(leftList[i] + " -> " + rightList[i] + "\n")
main() | [
"kedarpujara@gmail.com"
] | kedarpujara@gmail.com |
2cac0ac3790b90127a38572fbc918208c45b1259 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /buildtools/third_party/libc++abi/libc++abi.gyp | c3e6c07e1f9e77e7a1da0892ab9edadcfe7a0916 | [
"NCSA",
"MIT",
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 1,576 | gyp | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'libc++abi',
'type': 'static_library',
'toolsets': ['host', 'target'],
'dependencies=': [],
'sources': [
'trunk/src/abort_message.cpp',
'trunk/src/cxa_aux_runtime.cpp',
'trunk/src/cxa_default_handlers.cpp',
'trunk/src/cxa_demangle.cpp',
'trunk/src/cxa_exception.cpp',
'trunk/src/cxa_exception_storage.cpp',
'trunk/src/cxa_guard.cpp',
'trunk/src/cxa_handlers.cpp',
'trunk/src/cxa_new_delete.cpp',
'trunk/src/cxa_personality.cpp',
'trunk/src/cxa_thread_atexit.cpp',
'trunk/src/cxa_unexpected.cpp',
'trunk/src/cxa_vector.cpp',
'trunk/src/cxa_virtual.cpp',
'trunk/src/exception.cpp',
'trunk/src/private_typeinfo.cpp',
'trunk/src/stdexcept.cpp',
'trunk/src/typeinfo.cpp',
],
'include_dirs': [
'trunk/include',
'../libc++/trunk/include'
],
'variables': {
'clang_warning_flags': [
# http://llvm.org/PR25978
'-Wno-unused-function',
],
},
'cflags': [
'-fPIC',
'-fstrict-aliasing',
'-nostdinc++',
'-pthread',
'-std=c++11',
],
'cflags_cc!': [
'-fno-exceptions',
'-fno-rtti',
],
'cflags!': [
'-fvisibility=hidden',
],
},
]
}
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
6e082aea8360e7905c61b42c01eb77a38ed8b56c | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py | f639451de463da06e7decaaaabefad7426574541 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,464 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDatabaseAccountKeysResult',
'AwaitableListDatabaseAccountKeysResult',
'list_database_account_keys',
]
@pulumi.output_type
class ListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
"""
def __init__(__self__, primary_master_key=None, primary_readonly_master_key=None, secondary_master_key=None, secondary_readonly_master_key=None):
if primary_master_key and not isinstance(primary_master_key, str):
raise TypeError("Expected argument 'primary_master_key' to be a str")
pulumi.set(__self__, "primary_master_key", primary_master_key)
if primary_readonly_master_key and not isinstance(primary_readonly_master_key, str):
raise TypeError("Expected argument 'primary_readonly_master_key' to be a str")
pulumi.set(__self__, "primary_readonly_master_key", primary_readonly_master_key)
if secondary_master_key and not isinstance(secondary_master_key, str):
raise TypeError("Expected argument 'secondary_master_key' to be a str")
pulumi.set(__self__, "secondary_master_key", secondary_master_key)
if secondary_readonly_master_key and not isinstance(secondary_readonly_master_key, str):
raise TypeError("Expected argument 'secondary_readonly_master_key' to be a str")
pulumi.set(__self__, "secondary_readonly_master_key", secondary_readonly_master_key)
@property
@pulumi.getter(name="primaryMasterKey")
def primary_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-write key.
"""
return pulumi.get(self, "primary_master_key")
@property
@pulumi.getter(name="primaryReadonlyMasterKey")
def primary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-only key.
"""
return pulumi.get(self, "primary_readonly_master_key")
@property
@pulumi.getter(name="secondaryMasterKey")
def secondary_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-write key.
"""
return pulumi.get(self, "secondary_master_key")
@property
@pulumi.getter(name="secondaryReadonlyMasterKey")
def secondary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-only key.
"""
return pulumi.get(self, "secondary_readonly_master_key")
class AwaitableListDatabaseAccountKeysResult(ListDatabaseAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabaseAccountKeysResult(
primary_master_key=self.primary_master_key,
primary_readonly_master_key=self.primary_readonly_master_key,
secondary_master_key=self.secondary_master_key,
secondary_readonly_master_key=self.secondary_readonly_master_key)
def list_database_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value
return AwaitableListDatabaseAccountKeysResult(
primary_master_key=__ret__.primary_master_key,
primary_readonly_master_key=__ret__.primary_readonly_master_key,
secondary_master_key=__ret__.secondary_master_key,
secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
2337d1dc5879ef718152539d26f659496418ac42 | 7b75735b6d894d0c3bb40d657dc4c4eb436716c1 | /decompiled_code/library/encodings/mac_cyrillic.py | 107b50c3a389093639823c79c0bad8ba23a86f8b | [] | no_license | anton-shipulin/TRISIS-TRITON-HATMAN | fe54fb994214e35f80d39c26fbc5289f0b57b2bd | 1b167a9414b479331fb35a04eace75bb0e736005 | refs/heads/master | 2020-04-06T23:57:38.833044 | 2018-11-16T22:12:01 | 2018-11-16T22:12:01 | 157,886,273 | 3 | 0 | null | 2018-11-16T15:30:59 | 2018-11-16T15:30:58 | null | UTF-8 | Python | false | false | 2,383 | py | # uncompyle6 version 2.14.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.12 (default, Nov 19 2016, 06:48:10)
# [GCC 5.4.0 20160609]
# Embedded file name: encodings\mac_cyrillic.pyc
# Compiled at: 2016-06-25 21:46:06
""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='mac-cyrillic', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\u0410\u0411\u0412\u0413\u0414\u0415\u0416\u0417\u0418\u0419\u041a\u041b\u041c\u041d\u041e\u041f\u0420\u0421\u0422\u0423\u0424\u0425\u0426\u0427\u0428\u0429\u042a\u042b\u042c\u042d\u042e\u042f\u2020\xb0\u0490\xa3\xa7\u2022\xb6\u0406\xae\xa9\u2122\u0402\u0452\u2260\u0403\u0453\u221e\xb1\u2264\u2265\u0456\xb5\u0491\u0408\u0404\u0454\u0407\u0457\u0409\u0459\u040a\u045a\u0458\u0405\xac\u221a\u0192\u2248\u2206\xab\xbb\u2026\xa0\u040b\u045b\u040c\u045c\u0455\u2013\u2014\u201c\u201d\u2018\u2019\xf7\u201e\u040e\u045e\u040f\u045f\u2116\u0401\u0451\u044f\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u20ac'
encoding_table = codecs.charmap_build(decoding_table) | [
"ICSrepo"
] | ICSrepo |
7d2f40e4bd6adcc02a6d9b06df433d2786224b34 | 78a179ad824b6c70a33e0c6885e8d15e91cdbbdc | /correlation_sgd.py | a45a05f571ef8b9b87fc18bf8f728e5ced0288e2 | [] | no_license | bio-ontology-research-group/pgsim | 50ea74c9a0505e1a737a62441833aa6e3f2a8b8c | 4461828923431235fb6bcd65025aff6522fd267b | refs/heads/master | 2020-04-02T02:38:49.243292 | 2017-02-02T11:34:44 | 2017-02-02T11:34:44 | 49,642,348 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | #!/usr/bin/env python
import os
import sys
import numpy as np
from scipy.stats import spearmanr, pearsonr
from data import (
get_total_average_sims,
get_diff_average_sims,
DATA_ROOT)
def get_correlations(measures, filename):
'''
Calculates spearman and pearson correlations for
annotation size with mean and annotation size with variance
'''
corrs = list()
# annots, mean, var = get_total_average_sims(measures, filename)
annots, mean, var = get_diff_average_sims(measures, filename)
r1, p1 = spearmanr(annots, mean)
r2, p2 = spearmanr(annots, var)
corrs.append((r1, p1, r2, p2))
r1, p1 = pearsonr(annots, mean)
r2, p2 = pearsonr(annots, var)
corrs.append((r1, p1, r2, p2))
return corrs
def main(*args, **kwargs):
if len(args) < 3:
raise Exception('Please provide measures folder and filename')
measures = args[1]
filename = args[2]
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
corrs = get_correlations(measures, filename)
with open(DATA_ROOT + measures + '/' + name + '.diff.tsv', 'w') as f:
f.write('MEAN_CORR\tPVAL\tVAR_CORR\tPVAL\n')
for corr in corrs:
f.write('%f\t%f\t%f\t%f\n' % corr)
if __name__ == '__main__':
main(*sys.argv)
| [
"coolmaksat@gmail.com"
] | coolmaksat@gmail.com |
8b06cec79bca3f45e98d3df7c4466d5424a30695 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/62/usersdata/261/29578/submittedfiles/ex1.py | a8820f3dba06441ddcf5f5bb2dec3cd767f23e17 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = int(input('Digite a: '))
b = int(input('Digite b: '))
c = int(input('Digite c: '))
#COMECE A PARTIR DAQUI!
delta = ((b*b) - (4*a*c))**(1/2)
if delta <0:
print (str("SRR"))
else:
x1 = (-b+delta)/2*a
x2 = (-b-delta)/2*a
print ("x1=",x1)
print ("x2=",x2)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
edda0fd39872ba56cf1cbc2084decebee6a9348f | 6737ca32fe093c6ac6b203fc71dbc177853bfaee | /curate_orthogene/scripts/calc_identity_from_orthologs.py | 5d3aa1e3d635a4b1c9a3209b01f684125c133c40 | [] | no_license | lhui2010/bundle | 56a47bcdd2d41718b51da8c8cf23ab43577dfb4e | e31c8f2f65260ceff110d07b530b67e465e41800 | refs/heads/master | 2022-08-31T17:12:26.081984 | 2022-08-03T08:37:38 | 2022-08-03T08:37:38 | 74,002,474 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | #!/usr/bin/env python
import logging
import os
import sys
from multiprocessing import Pool
import argparse
import subprocess
#TODO
#Remove global variable
#OptParser
usage = """Calculate Identity from orthologs
Usage:
{} -o workdir REF_TAG Ortho_File QRY.fa REF.fa >A188.identity
""".format(__file__)
parser = argparse.ArgumentParser()
parser.add_argument("REF_TAG", help="The unique keyword in gene IDs of reference genes")
parser.add_argument("OrthoFile", help="The tab deliminated ortholog file of orthologs: Eg: A188_A188G12312 B73_Zm00001d001012")
parser.add_argument("QRY_FA", help="Fasta of query fasta files")
parser.add_argument("REF_FA", help="Fasta of reference fasta files")
parser.add_argument("-o", "--output_dir", default='workdir',
help="specifying output directory")
parser.add_argument("-t", "--threads", default=55, type=int,
help="specifying threads")
args = parser.parse_args()
WORKDIR = args.output_dir
THREADS = args.threads
REF_ID = args.REF_TAG
ORTHO_FILE = args.OrthoFile
QRY_FA = args.QRY_FA
REF_FA = args.REF_FA
#print([WORKDIR, THREADS, ORTHO_FILE, REF_ID, QRY_FA, REF_FA])
#exit()
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + "/"
#Required several scripts:
#1. /lustre/home/liuhui/bin/lh_bin/select_fasta.pl
#2. muscle in path dir
#3. msa2identity.py in current dir
#Input example
#python parser_ortho.py final_ortho.txt.add_info.format A188.pep B73.pep
def os_run_test(input_str, qry_fa = "A188.pep", ref_fa = "B73.pep", workdir="workdir"):
print("\t".join([input_str, qry_fa, ref_fa]))
#def os_run(input_str, qry_fa, ref_fa, workdir="workdir"):
#def os_run(input_str, qry_fa = "A188.pep", ref_fa = "B73.pep", workdir="workdir"):
def os_run(input_str, qry_fa = QRY_FA, ref_fa = REF_FA, workdir=WORKDIR):
mylist = input_str.rstrip().split()
qry_name = mylist[0]
ref_names = mylist[1].split(',')
output_fa = os.path.join(workdir, qry_name + ".fa")
output_aln = os.path.join(workdir, qry_name + ".aln")
output_identity = os.path.join(workdir, qry_name + ".identity")
logging.warning("{} {} {} > {} ".format("perl " + SCRIPT_DIR + "select_fasta.pl", qry_name, qry_fa, output_fa))
os.system("{} {} {} > {} ".format("perl " + SCRIPT_DIR + "select_fasta.pl", qry_name, qry_fa, output_fa))
for ref_name in ref_names:
logging.warning(ref_name)
os.system("{} {} {} >> {} ".format("perl " + SCRIPT_DIR + "select_fasta.pl", ref_name, ref_fa, output_fa))
os.system("{} {} > {} ".format("muscle -in ", output_fa, output_aln))
os.system("{} {} {} > {} ".format("python " + SCRIPT_DIR + "msa2identity.py ", REF_ID, output_aln, output_identity))
if __name__ == "__main__":
os.system("mkdir -p {}".format(WORKDIR))
file_lines = []
with open(ORTHO_FILE) as fh:
file_lines = fh.readlines()
#print(file_lines)
# qry_fa = sys.argv[2]
# ref_fa = sys.argv[3]
with Pool(THREADS) as p:
#p.apply_async(os_run, (file_lines, qry_fa, ref_fa,))
# p.map(os_run_test, file_lines)
p.map(os_run, file_lines)
output = subprocess.check_output("for iden_ite in {}/*identity; do sort -k4,4g $iden_ite |sed -n '1p;$p' ; done".format(WORKDIR), shell=True)
print(output.decode())
# os.system("touch syn.identity && rm syn.identity")
# output = os.system("for iden_ite in {}/*identity; do sort -k4,4g ${iden_ite} |sed -n '1p;$p' >>syn.identity; done".format(WORKDIR))
| [
"lhui2010@gmail.com"
] | lhui2010@gmail.com |
77674c66ad7d3b0b071716b52e09cf66241e1953 | 7abbcd16dcf2e639e53665d50ec113e1374b79eb | /checkout/urls.py | c05c00460f8ef6e05ed8dd59477e3f5d3f254c8b | [] | no_license | srajsonu/ROIIM-Assignment-Paysafe | ab6f160641adb69cef2f78bde594322f286ff089 | 1d9586e29f1871e4e9577ff2befd594c8a9cbbe4 | refs/heads/main | 2023-01-04T23:42:53.403941 | 2020-10-31T17:17:57 | 2020-10-31T17:17:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from django.urls import path
from . import views
app_name = 'checkout'
urlpatterns = [
path('checkout/', views.checkout, name='checkout'),
path('payment_successful/', views.payment_successful, name='payment_successful')
]
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
b29c08e430263fddd494f44cf1aa620f7cdf25de | 5b70fbd53b534306c146ffb98a0f99d2343a948f | /src/Python/Problem99.py | 8e774a09fb35eba85b4bcfd02776deb92c9fcb7f | [] | no_license | aniruddhamurali/Project-Euler | 1f4ff3aa1e9c4efbc2a85026821e19a28b5edf90 | 408b3098fbc98ff3954679602c0468ddb56ea0ac | refs/heads/master | 2020-03-20T23:07:22.178103 | 2018-07-27T01:40:46 | 2018-07-27T01:40:46 | 137,830,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | def max_exp():
name = "Problem99.txt"
file = open(name, 'r')
maxNum = None
track = 1
for line in file:
comma = line.find(',')
base = line[:comma]
exp = line[comma+1:]
base = float(base)
exp = (int(exp))/700000
num = float(base**exp)
if maxNum == None or num > maxNum:
maxNum = num
maxLine = track
track = track + 1
return maxLine
| [
"aniruddha.murali@gmail.com"
] | aniruddha.murali@gmail.com |
4c7b091a0abe71c1c798652c3b184644b1afa2b9 | 325bee18d3a8b5de183118d02c480e562f6acba8 | /india/india_c/india/ScriptDir/Initialization.py | 6c7a76d5cc4ccf59d06d3fe9407de58d01d1dac9 | [] | no_license | waynecanfly/spiderItem | fc07af6921493fcfc21437c464c6433d247abad3 | 1960efaad0d995e83e8cf85e58e1db029e49fa56 | refs/heads/master | 2022-11-14T16:35:42.855901 | 2019-10-25T03:43:57 | 2019-10-25T03:43:57 | 193,424,274 | 4 | 0 | null | 2022-11-04T19:16:15 | 2019-06-24T03:00:51 | Python | UTF-8 | Python | false | false | 508 | py | #coding:utf-8
import shutil
import os
class Initialization(object):
def InitializeMain(self):
shutil.rmtree('D:\item\OPDCMS\listed company update\india\data\zip/full')
shutil.rmtree('D:\item\OPDCMS\listed company update\india\data\pdf/full')
print("*"*93)
for i in range(2):
print("*" + '\t'*23 + "*")
print("*" + '\t'*10 + '初始化完成!' + '\t'*11 + "*")
for i in range(2):
print("*" + '\t'*23 + "*")
print("*" * 93)
| [
"1370153124@qq.com"
] | 1370153124@qq.com |
de126b11e439484e7edf1502679f25c9f872f105 | fba9dd10028ae22eadc6da307f6df66d3c658d2e | /Modulo02/Desafio037-Condicoes-Aninhadas.py | 4e3fbc6f1abb423c280f4450d0de158de58b86eb | [] | no_license | GabrielCardoso2019/Curso-de-Python | bbf52330236a5847e6c48fe8ed01efd767a2f92e | 2e3b4066d72d0dc599aa68b8ee2ab2571324a372 | refs/heads/master | 2023-02-25T03:41:35.906086 | 2021-02-01T17:31:57 | 2021-02-01T17:31:57 | 335,028,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | num = int(input('Digite um número inteiro: '))
print('-=-' * 20)
print('''Escolha uma das bases para conversão:
[ 1 ] Converter para BINÁRIO
[ 2 ] Converter para OCTAL
[ 3 ] Converter para HEXADECIMAL''')
opcao = int(input('\nEscolha uma opção: '))
print('-=-' * 20)
if opcao == 1:
print('{} converção para BINÁRIO é igual a {}'.format(num, bin(num)[2:]))
elif opcao == 2:
print('{} converção para OCTAL é igual a {}'.format(num, oct(num)[2:]))
elif opcao == 3:
print('{} converção para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))
else:
print('Opção inválida. Tente novamente!')
| [
"gabrielcardososs2016@hotmail.com"
] | gabrielcardososs2016@hotmail.com |
d5ff0cf688e5fbed28f798dfc48819ae4b88c02e | 53d4a3f2ec628c5482c78580064b5a36f85b5a70 | /spider/__init__.py | fb47e17d5503683245603b58b54feaf52009f917 | [
"BSD-2-Clause"
] | permissive | yaoxiaokui/PSpider | e4e883b4c01abd1f40e99473e1164b9f8508799f | 6b1ea034541ea0317aae48d800e348a9a90ff798 | refs/heads/master | 2020-03-21T12:29:21.859903 | 2018-06-13T08:38:37 | 2018-06-13T09:20:45 | 138,554,987 | 0 | 1 | BSD-2-Clause | 2018-06-25T06:55:08 | 2018-06-25T06:55:07 | null | UTF-8 | Python | false | false | 284 | py | # _*_ coding: utf-8 _*_
"""
define WebSpider, WebSpiderDist, and also define utilities and instances for web_spider
"""
__version__ = "1.3.0"
from .utilities import *
from .instances import Fetcher, Parser, Saver, Proxieser
from .concurrent import TPEnum, WebSpider, WebSpiderDist
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
1736498bb96ef9719b6fdac3e8f5e3b846b361c3 | 947ccd64444a225caec6811a74747bd070a3cfe6 | /shop/models.py | 9f9b9617feb48ca113d8c2ff15c2855bf8743cf7 | [] | no_license | mishaukr7/eschools | a6bca6118f62af6d90badd77848311a7bc3964cf | 5dd5bf07901a6871470b54df763164dda67c5f19 | refs/heads/master | 2020-04-10T00:50:41.555444 | 2019-01-08T09:14:20 | 2019-01-08T09:14:20 | 160,697,869 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from django.db import models
# Create your models here.
class News(models.Model):
title = models.TextField(max_length=400)
content = models.TextField(max_length=4096)
video = models.URLField(blank=True, null=True)
| [
"mishaukr22@gmail.com"
] | mishaukr22@gmail.com |
9d8166fa1ed04426e1ea570b534c19fd26448bfd | f636e71f45170e6cf197bcfc14a50af45fd828ed | /Lesson 7/examples/example1.py | 50389e99a91e8bbac016e25a61b26af0bf0949bc | [] | no_license | mcgokable/Cources | 872640c62fdb82f19ec7f5d20700fed85c4a3447 | 960ef7d4e72af0ec29c9a78220b44873f2ef2d35 | refs/heads/master | 2022-04-06T23:35:30.477455 | 2019-11-29T13:22:40 | 2019-12-05T07:31:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import smtplib
from email.message import EmailMessage
server = smtplib.SMTP()
server.connect('localhost')
if __name__ == '__main__':
message = EmailMessage()
message['From'] = input('Email From: ')
message['To'] = input('Email To: ')
message['Subject'] = input('Subject: ')
message.set_content(input('Message: '))
server.send_message(
msg=message
)
server.close()
| [
"19941510metalhead@gmail.com"
] | 19941510metalhead@gmail.com |
4fe960c3b3305e438c3d415d14a910f867aea18b | dbb32a7d5b96a94533b27a6ccf2474c660a863b7 | /containers/user/sources/utils/types/component/identity.py | fcfde17dd53b6c70d4fcd4381112bc7db3a823c4 | [] | no_license | ankurhcu/FogBus2 | 772e8346c5e01e2aa8a02da9ef91fd696dd587a7 | 2cefabdd1d131fc8e9015ca31d414665e6014a69 | refs/heads/main | 2023-08-07T15:33:54.039724 | 2021-09-21T05:02:49 | 2021-09-21T05:02:49 | 410,610,212 | 1 | 0 | null | 2021-09-26T16:57:23 | 2021-09-26T16:57:22 | null | UTF-8 | Python | false | false | 2,321 | py | from .role import ComponentRole
from ..basic import Address
class ComponentIdentity:
def __init__(
self,
addr: Address,
role: ComponentRole = ComponentRole.DEFAULT,
hostID: str = None,
componentID: str = None,
name: str = None,
nameLogPrinting: str = None,
nameConsistent: str = None):
self.role = role
self.addr = addr
if componentID is None:
self.componentID = '?'
else:
self.componentID = componentID
if hostID is None:
self.hostID = self.generateHostID()
else:
self.hostID = hostID
if name is None:
self.name = '%s-%s_%s-%d' % (
self.role.value, self.componentID, addr[0], addr[1])
else:
self.name = name
if nameLogPrinting is None:
self.nameLogPrinting = self.name
else:
self.nameLogPrinting = nameLogPrinting
if nameConsistent is None:
self.nameConsistent = '%s_%s' % (self.role.value, self.hostID)
else:
self.nameConsistent = nameConsistent
def generateHostID(self):
info = self.addr[0]
# return sha256(info.encode('utf-8')).hexdigest()
return info
@staticmethod
def getHostIDFromNameConsistent(nameConsistent: str):
return nameConsistent[-64:]
def setIdentities(
self,
addr: Address = None,
name: str = None,
componentID: str = None,
nameLogPrinting: str = None,
nameConsistent: str = None,
hostID: str = None):
if addr is not None:
self.addr = addr
if name is not None:
self.name = name
else:
self.name = '%s-%s_%s-%d' % (
self.role.value, self.componentID, self.addr[0], self.addr[1])
if componentID is not None:
self.componentID = componentID
if nameLogPrinting is not None:
self.nameLogPrinting = nameLogPrinting
else:
self.nameLogPrinting = self.name
if nameConsistent is not None:
self.nameConsistent = nameConsistent
if hostID is not None:
self.hostID = hostID
| [
"plocircle@live.com"
] | plocircle@live.com |
c4cfc248eac8fcc9673f45a9e0869b6854d36951 | 8eff7e195a9cb4aba3700ff933782240fc5dfacf | /context-managers/change_path_context.py | bd75c25b168780b2de840c23346872b737fa9309 | [] | no_license | ThiaguinhoLS/Code | 58ec668df799f10b245267c3184c138d8434878c | 8b3c6fb9eeb1479ccf92ae05ed578a9c44fa7138 | refs/heads/master | 2020-03-22T01:01:07.081909 | 2018-07-19T04:23:48 | 2018-07-19T04:23:48 | 139,278,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
@contextmanager
def change_path(path):
'''
Altera o path atual e depois do fechamento do contexto retorna o path
Como usar:
with change_path('../'):
do something
'''
actual = os.getcwd()
os.chdir(path)
yield
os.chdir(actual)
| [
"tthiaguinho638@gmail.com"
] | tthiaguinho638@gmail.com |
4c05bef3beda374eb0b8d163120cc24ec35bae39 | cbbdbdfa3d69a11de5dbd80f860986c97ec10b67 | /test/validate/test_base.py | 279edbf7dd38a809869867722912f8a8cb73961a | [
"MIT"
] | permissive | lokeshmeher/schema | 757cbc837c91f124774d3a1562ceccc255f17026 | 3c7478d27f87a2f1a7f2c2da67beced4a76704cc | refs/heads/master | 2021-06-04T18:50:42.461646 | 2016-02-24T04:15:04 | 2016-02-24T04:15:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,512 | py | # encoding: utf-8
from __future__ import unicode_literals
import re
from marrow.schema import Attribute, Container
from marrow.schema.compat import unicode
from marrow.schema.testing import ValidationTest
from marrow.schema.validate.base import *
class TestAlways(ValidationTest):
validator = Always().validate
valid = (None, False, True, 0, 1, 3.14, '', 'foo', [], ['bar'], {}, {'baz': 'diz'})
class TestNever(ValidationTest):
validator = Never().validate
invalid = TestAlways.valid
class TestTruthy(ValidationTest):
validator = Truthy(True).validate
valid = (True, 'Foo', 1, [None], (None, ), set("abc"))
invalid = (False, '', 0, [], tuple(), set())
class TestFalsy(ValidationTest):
validator = Falsy(True).validate
valid = TestTruthy.invalid
invalid = TestTruthy.valid
class TestRequired(ValidationTest):
validator = Required(True).validate
valid = (True, False, 0, 1, 'abc')
invalid = (None, [], '')
class TestMissing(ValidationTest):
validator = Missing(True).validate
valid = TestRequired.invalid
invalid = TestRequired.valid
class TestEmptyCallback(ValidationTest):
validator = Callback().validate
valid = TestTruthy.valid + TestTruthy.invalid
class TestSuccessCallback(ValidationTest):
validator = Callback(lambda V, v, x: v).validate
valid = TestEmptyCallback.valid
class TestFailureCallback(ValidationTest):
validator = Callback(lambda V, v, x: Concern("Uh, no.")).validate
invalid = TestSuccessCallback.valid
class TestCallbacks(object):
@Callback # Yes, you really can use it this way. Implies staticmethod.
def raises(validator, value, context):
raise Concern("Oh my no.")
assert isinstance(raises, Callback) # Let's make sure that worked...
def test_raises(self):
try:
self.raises.validate(None)
except Concern as e:
assert unicode(e) == "Oh my no."
else:
assert False, "Failed to raise a Concern."
class TestInAny(ValidationTest):
validator = In().validate
valid = TestAlways.valid
class TestInSimple(ValidationTest):
validator = In([1, 2, 3]).validate
valid = (1, 2, 3)
invalid = (None, 0, 4, 'bob')
class TestInDescriptive(ValidationTest):
validator = In([(1, "First"), (2, "Second"), (3, "Third")]).validate
valid = TestInSimple.valid
invalid = TestInSimple.invalid
class TestInCallback(ValidationTest):
validator = In(lambda: [1, 2, 3]).validate
valid = TestInSimple.valid
invalid = TestInSimple.invalid
class TestContains(object):
empty = Contains()
simple = Contains(27)
callback = Contains(lambda: 42)
def _do(self, validator):
assert validator.validate([1, 27, 42]) == [1, 27, 42]
try:
validator.validate([1, 2, 3])
except Concern as e:
assert unicode(e).startswith("Value does not contain: ")
else:
assert False, "Failed to raise a Concern."
def test_empty(self):
assert self.empty.validate([4, 20]) == [4, 20]
def test_simple(self):
self._do(self.simple)
def test_callback(self):
self._do(self.callback)
class TestLength(object):
empty = Length()
simple = Length(20)
callback = Length(lambda: 10)
rangeish = Length(slice(5, 15, 2)) # Yup, even step works here.
exact = Length(slice(32, 33)) # I.e. for an MD5 hash. L <= v < R
tupleish = Length((5, 15)) # Won't work for now. See TODO.
def test_empty(self):
assert self.empty.validate('') == ''
def _do(self, validator, good, bad):
assert validator.validate(good) == good
try:
validator.validate(bad)
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
def test_simple(self):
self._do(self.simple, " " * 5, " " * 25)
self._do(self.simple, ('', ) * 5, None)
def test_callback(self):
self._do(self.callback, " " * 5, " " * 15)
def test_rangeish(self):
self._do(self.rangeish, " " * 7, " " * 8)
self._do(self.rangeish, " " * 11, " " * 4)
self._do(self.rangeish, " " * 11, " " * 27)
self._do(self.rangeish, ('', ) * 5, None)
def test_exact(self):
self._do(self.exact, " " * 32, " " * 31)
self._do(self.exact, " " * 32, " " * 33)
class TestRange(object):
empty = Range()
minonly = Range(5, None)
maxonly = Range(None, 5)
minmax = Range(5, 10)
odd = Range((2,6), (3,4))
callback = Range(None, lambda: 5)
def _do(self, validator, good, bad):
assert validator.validate(good) == good
try:
validator.validate(bad)
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
def test_empty(self):
assert self.empty.validate('') == ''
def test_minimum(self):
self._do(self.minonly, 10, 3)
self._do(self.minmax, 5, 4)
def test_maximum(self):
self._do(self.maxonly, 5, 10)
self._do(self.minmax, 10, 11)
self._do(self.minmax, 10, 11)
self._do(self.callback, 5, 6)
def test_odd(self):
self._do(self.odd, (2,7), (2,4))
self._do(self.odd, (3,2), (3,5))
self._do(self.odd, (3, ), (2, ))
class TestPattern(object):
empty = Pattern()
simple = Pattern(r'[a-zA-Z]+') # TODO: This is a simple string regex.
simple = Pattern(re.compile(r'^[a-zA-Z]+$'))
def test_empty(self):
assert self.empty.validate('') == ''
def test_simple(self):
assert self.simple.validate('foo') == 'foo'
try:
self.simple.validate('Xyzzy-27!')
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
class TestInstance(object):
empty = Instance()
uni = Instance(unicode)
def test_empty(self):
assert self.empty.validate('') == ''
def test_uni(self):
assert self.uni.validate('hello') == 'hello'
try:
self.uni.validate(27)
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
class TestSubclass(object):
empty = Subclass()
valid = Subclass(Validator)
def test_empty(self):
assert self.empty.validate(object) is object
def test_valid(self):
assert self.valid.validate(Subclass) is Subclass
try:
self.valid.validate(object)
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
class TestEqual(object):
empty = Equal()
equal = Equal(27)
nil = Equal(None)
def test_empty(self):
assert self.empty.validate('') == ''
assert self.empty.validate(None) is None
def test_equal(self):
assert self.equal.validate(27) == 27
assert self.equal.validate(27.0) == 27.0
try:
self.equal.validate('27')
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
def test_nil(self):
assert self.nil.validate(None) is None
try:
self.equal.validate(False)
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
class TestUnique(object):
validator = Unique()
def _do(self, good, bad):
assert self.validator.validate(good) == good
try:
self.validator.validate(bad)
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
def test_text(self):
self._do('cafe', 'babe')
def test_list(self):
self._do([27, 42], [1, 3, 3, 7])
def test_dict(self):
self._do(dict(bob=27, dole=42), dict(prince=12, pepper=12))
class TestValidated(ValidationTest):
class Sample(Container):
foo = Validated(validator=Equal(27))
def test_pass(self):
inst = self.Sample(27)
assert inst.foo == 27
inst = self.Sample()
inst.foo = 27
def test_fail(self):
try:
self.Sample(42)
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
inst = self.Sample()
try:
inst.foo = 42
except Concern as e:
pass
else:
assert False, "Failed to raise a Concern."
| [
"alice@gothcandy.com"
] | alice@gothcandy.com |
69056d35e2d53095946c30eb093026ce866e92e3 | 72f37dabe9cddde460c5f5afad802d7d3d972af7 | /data_storage/transform.py | 0ac0ff439cc65b183b2945f6b93d14ac2e70b27c | [
"Apache-2.0"
] | permissive | dbca-wa/data-storage | fdf6de6735f9e18688ad8a3d1755295b3c11fa2d | ff8c93978d78042117a9c04217a31c3d7ecb2a3f | refs/heads/master | 2023-08-28T20:14:47.823353 | 2021-11-05T01:22:33 | 2021-11-05T01:22:33 | 263,812,682 | 0 | 2 | Apache-2.0 | 2021-03-15T03:17:36 | 2020-05-14T04:13:51 | Python | UTF-8 | Python | false | false | 2,554 | py | import tempfile
import os
import json
from .utils import remove_folder,JSONEncoder,JSONDecoder
from .resource import ResourceConstant
def change_metaindex(repository_metadata,f_metaname_code):
"""
Change the index calculating logic of the indexed resource repository
repository_metadata: the current resource repository's metadata
f_metaname_code: the new source code to calculate a resource's metaname
"""
work_dir = tempfile.mkdtemp()
try:
#save all existing resource metadatas to a json file
with open(os.path.join(work_dir,"resource_metadatas.json"),'w') as f:
for res_metadata in repository_metadata.resource_metadatas(throw_exception=False,resource_status=ResourceConstant.ALL_RESOURCE,resource_file=None):
f.write(json.dumps(res_metadata,cls=JSONEncoder))
f.write(os.linesep)
#meta index file
meta_dir = os.path.join(work_dir,"metadata")
os.mkdir(meta_dir)
repository_metadata.download(os.path.join(meta_dir,"{}.json".format(repository_metadata._metaname)))
#download all meta data file
for metaname,filename in repository_metadata.json:
repository_metadata.create_metadata_client(metaname).download(os.path.join(meta_dir,os.path.split(filename)[1]))
#remove meta file
for metaname in [o[0] for o in repository_metadata.json]:
repository_metadata.create_metadata_client(metaname).delete()
#remove meta index file
repository_metadata.delete()
#create a new repository metadata
keywords = dict((key,getattr(repository_metadata,attr)) for key,attr in repository_metadata.meta_metadata_kwargs)
keywords["f_metaname_code"] = f_metaname_code
new_repository_metadata = repository_metadata.__class__(repository_metadata._storage,**keywords)
with open(os.path.join(work_dir,"resource_metadatas.json"),'r') as f:
while True:
data = f.readline()
if not data:
break
data = data.strip()
if not data:
continue
res_metadata = json.loads(data.strip(),cls=JSONDecoder)
new_repository_metadata.update_resource(res_metadata)
remove_folder(work_dir)
return new_repository_metadata
except Exception as ex:
print("Failed to change the metadata index, check the folder({}) to get the previous meta data".format(work_dir))
| [
"rocky.chen@dpaw.wa.gov.au"
] | rocky.chen@dpaw.wa.gov.au |
e26e05796f0d6099c19bcd248bd8e34c70b566f3 | c5fedd852a93dd20bd1ab4db41997e3eeae64e63 | /sites/dev/settings.py | 3bb980b38c96fc6e417092022163f6ba1fbc5356 | [
"MIT"
] | permissive | rishikant42/django-business-logic | deab4e300f243da6428e19cc7c96d3af16375adc | 6f3d0774d7d30b64dc73ed32736ec045afb5de48 | refs/heads/master | 2020-03-31T07:28:24.664684 | 2018-09-17T13:55:28 | 2018-09-17T13:55:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | from ..settings import *
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS += [
# 'django_extensions',
'bootstrap3',
'sites.dev.books',
]
ROOT_URLCONF = 'sites.dev.urls'
WSGI_APPLICATION = 'sites.dev.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'db.sqlite3'),
}
}
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_FINDERS = [
'sites.dev.utils.staticfiles.finders.AppDirectoriesIndexFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
if 'test' in sys.argv[1:] or 'jenkins' in sys.argv[1:]:
from ..test.settings import *
| [
"dgk@dgk.su"
] | dgk@dgk.su |
2e1c064f2f9ec480216bc2fec17ee9324f1500a7 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/override/methodWithOverloadsInTheSameFile.py | 70f8f8ba8877cc6015e8c1b173c3c5aa3f88a862 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 211 | py | from typing import overload
class Foo:
@overload
def fun(self, s:str) -> str: pass
@overload
def fun(self, i:int) -> int: pass
def fun(self, x):
pass
class B(Foo):
<caret>pass | [
"Semyon.Proshev@jetbrains.com"
] | Semyon.Proshev@jetbrains.com |
e46ebbee527c297550cc6be199c7e3e85394ba22 | 061684e59ba5c816419f763a25629af987f60d52 | /CashAlgo/weighted_rsi_intraday_strategy.py | 61ca7e5b7316c417ea7bfebdcb4aa5cb41feda39 | [] | no_license | wangyouan/PythonTest | 8d798fc5cde3ecaeb64301c3290fe51ea8577523 | 62177829b81e918cadb4a24527c4cdcaff734d7d | refs/heads/master | 2021-06-17T11:18:11.973935 | 2017-03-26T07:07:18 | 2017-03-26T07:07:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,498 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# File name: weighted_rsi_intraday_strategy
# Author: warn
# Date: 27/12/2015 10:20
import numpy
import talib
import cashAlgoAPI
class Strategy:
long_flag = False
short_flag = False
def __init__(self):
self.close_data = []
self.cnt = 0
self.last_data = None
self.data_number = None
self.rsi_period = None
self.hold_volume = 0
self.total_capital = 0
self.current_capital = 0
def init(self):
self.cnt = 0
self.close_data = []
self.hold_volume = 0
self.data_number = int(self.config.get("Strategy", "MaxDataNumber"))
self.rsi_period = int(self.config.get("Strategy", "RsiPeriod"))
self.current_capital = self.total_capital = float(self.config.get("Risk", "InitialCapital"))
def onMarketDataUpdate(self, market, code, md):
# The following time is not allowed to trade. Only trade from 9:30 am to 12:00 am, and from 13:00 to 16:00
time_info = md.timestamp.split('_')
if not (int(time_info[1][:2]) in (range(10, 12) + range(13, 16)) or
(time_info[1][:2] == '09' and int(time_info[1][2:]) >= 3000) or
time_info[1][:4] == '1600'):
return
# Open price clear all the data
if self.last_data != time_info[0]:
self.last_data = time_info[0]
self.close_data = []
if len(self.close_data) >= self.data_number:
self.close_data.pop(0)
self.close_data.append([md.lastPrice, md.lastVolume])
if len(self.close_data) >= self.rsi_period:
rsi_result = self.get_weighted_rsi()
if rsi_result < 30 and not self.long_flag:
self.long_flag = True
self.short_flag = False
elif self.long_flag and 70 > rsi_result >= 30:
self.long_flag = False
volume = self.total_capital / 10 / md.lastPrice
if md.lastPrice * volume > self.current_capital >= md.lastPrice:
volume = self.current_capital / md.lastPrice
if volume * md.lastPrice <= self.current_capital:
self.long_security(md.timestamp, code, md.askPrice1, volume)
elif self.long_flag and rsi_result >= 70:
self.long_flag = False
elif self.hold_volume:
if rsi_result > 70 and not self.short_flag:
self.short_flag = True
elif self.short_flag and 70 >= rsi_result > 30:
self.short_flag = False
self.short_security(md.timestamp, code, md.lastPrice)
if self.hold_volume and md.timestamp.split('_')[1][:4] == '1600':
print "Close market at %s, and sell all the belongings" % md.timestamp
self.short_security(md.timestamp, code, md.lastPrice)
self.short_flag = False
self.long_flag = False
def long_security(self, timestamp, code, price, volume):
order = cashAlgoAPI.Order(timestamp, 'SEHK', code, str(self.cnt), price, int(volume),
"open", 1, "insert", "market_order", "today")
self.mgr.insertOrder(order)
self.cnt += 1
self.hold_volume += int(volume)
self.current_capital -= int(volume) * price
def short_security(self, timestamp, code, price):
order = cashAlgoAPI.Order(timestamp, 'SEHK', code, str(self.cnt), price, self.hold_volume,
"open", 1, "insert", "market_order", "today")
self.mgr.insertOrder(order)
self.cnt += 1
self.current_capital += self.hold_volume * price
self.total_capital = self.current_capital
self.hold_volume = 0
def onOHLCFeed(self, of):
# print "feed price of %s is %s" % (of.productCode, of.close)
md = cashAlgoAPI.MarketData([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
md.timestamp = of.timestamp
md.market = of.market
md.productCode = str(of.productCode)
md.lastPrice = of.close
md.askPrice1 = of.close
md.bidPrice1 = of.close
md.lastVolume = of.volume
self.onMarketDataUpdate(of.market, of.productCode, md)
# Process Order
def onOrderFeed(self, of):
pass
# Process Trade
def onTradeFeed(self, tf):
# print "Trade feed: %s price: %s, timestamp: %s volume: %s" % (tf.buySell, tf.price, tf.timestamp, tf.volume)
pass
# Process Position
def onPortfolioFeed(self, portfolioFeed):
pass
# Process PnL
def onPnlperffeed(self, pf):
# print "dailyPnL: %s" % pf.dailyPnL
pass
def get_weighted_rsi(self):
up_price = []
down_price = []
last_price = None
close_data = self.close_data[-self.rsi_period:]
for i in close_data:
if last_price:
if last_price > i[0]:
down_price.append(i)
elif last_price < i[0]:
up_price.append(i)
last_price = i[0]
up_rs = sum([i[0] * i[1] for i in up_price]) / sum([i[1] for i in up_price])
if down_price:
down_rs = sum([i[0] * i[1] for i in down_price]) / sum([i[1] for i in down_price])
else:
down_rs = 0.01
rsi = 100 - 100 / (1 + up_rs / down_rs)
return rsi
| [
"wangyouan0629@hotmail.com"
] | wangyouan0629@hotmail.com |
f4a3e6d8a542ab88facff6b22770e3ff898d3050 | f6f29c2fa719c53eee73de2acd86db9e1278182e | /design_patterns/observor/event_system.py | 6661d910ed6e4bf7106b6ddb905dac47ca3f201a | [] | no_license | byt3-m3/python_code_practice | ca08320e1778449d30204b65f15903d5830b7975 | 40e215c4d4ab62cf7d55d2456d94550335825906 | refs/heads/master | 2023-07-24T08:29:06.624850 | 2021-09-04T02:39:32 | 2021-09-04T02:39:32 | 256,984,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | from dataclasses import dataclass
from enum import Enum
from queue import Queue
from typing import List, Tuple, Any
import asyncio
class RegisteredMethods(Enum):
""""""
message_que = Queue()
@dataclass
class Event:
event_name: str
data: Any
class EventManager:
subscribers = dict()
def __init__(self, *args, **kwargs):
self.name = kwargs.get("name")
def subscribe(self, event_name, function):
if event_name not in self.subscribers:
self.subscribers[event_name] = {function}
# self.subscribers[event_name].(function)
def publish_event(self, event: Event):
if event.event_name not in self.subscribers:
return
for function in self.subscribers[event.event_name]:
test = asyncio.create_task(function(event.data))
# function(event.data)
def register_handlers(self, event_handler_tuples: List[Tuple[str, str]]):
for event_handler_tuple in event_handler_tuples:
print(f"Registering {event_handler_tuple}")
self.subscribe(event_handler_tuple[0], event_handler_tuple[1])
@property
def registered_handlers(self):
return {'registered_handlers': list(self.subscribers.items())}
class Handlers:
@staticmethod
def handle_send_message_event(data):
print(f"Sending Message: {data}")
@staticmethod
def handle_send_email_event(data):
print(f"Sending Email1: {data}")
@staticmethod
def handle_send_email_event2(data):
print(f"Sending Email2: {data}")
def get_event_manager(name: str):
event_manager = EventManager(name)
event_manager.register_handlers(event_handler_tuples=[
('send_message', Handlers.handle_send_message_event),
('send_email', Handlers.handle_send_email_event),
# ('send_email', Handlers.handle_send_email_event2)
])
return event_manager
def main():
test_data = {"test": "data"}
event_1 = Event(
data=test_data,
event_name='send_email'
)
event_2 = Event(
data=test_data,
event_name='send_message'
)
event_manager = get_event_manager(name='TestBus')
event_manager.publish_event(event=event_1)
event_manager.publish_event(event=event_2)
print(event_manager.registered_handlers)
if __name__ == '__main__':
main() | [
"cbaxtertech@gmail.com"
] | cbaxtertech@gmail.com |
7ae1ccf015cbb9f6702d6dbac4c89554e20e7909 | ba9e1fc7797ebc55a61a40ee66c51b467f353ff1 | /web_scraping_with_python_demos/docx-parser.py | d56bc005da5bfc4c2d478182efe0cc74e860b49e | [] | no_license | sanpianye/the-little-python | 77c938164d43cbb120063a6d17d0705cc9e92e93 | c04898bf0812afb53b71567699ee523d1bc56a29 | refs/heads/master | 2021-06-14T01:55:31.452777 | 2017-03-09T13:31:59 | 2017-03-09T13:31:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
''''''
__author__ = 'Engine'
from zipfile import ZipFile
from urllib.request import urlopen
from io import BytesIO
from bs4 import BeautifulSoup
# 从.docx文件读取xml的步骤
# 1. 读取.docx文件
wordFile = urlopen("http://pythonscraping.com/pages/AWordDocument.docx").read()
# 2. 转换成二进制对象
wordFile = BytesIO(wordFile)
# 3. 解压(所有.docx文件为了节省空间都进行过压缩)
document = ZipFile(wordFile)
# 4. 读取解压文件, 就得到了xml内容
xml_content = document.read("word/document.xml")
# 创建BeautifulSoup对象
wordObj = BeautifulSoup(xml_content.decode("utf-8"))
# 根据xml标签再处理就很简单了
textStrings = wordObj.findAll("w:t") # 所有正文内容都包含在<w:t>标签里
for textElem in textStrings:
closeTag = ""
try:
style = textElem.parent.previousSibling.find("w:pstyle")
if style is not None and style["w:val"] == "Title":
print("<h1>")
closeTag = "</h1>"
except AttributeError:
pass
print(textElem.text)
print(closeTag)
| [
"enginechen07@gmail.com"
] | enginechen07@gmail.com |
a2a3894ce8cbaf1c0878ecb12053cf13cd82f539 | edfcd96f0010ea068a4c046bdcf7067ff92d3f9b | /Cryptography/Find_Files_5.py | c6fe25661616e3f39926bcb2020522f17adf1491 | [] | no_license | afsanehshu/python-project | a99ff558f375c1f5e17ea6ffc13af9216ec4733f | 48905cfd24df6d1f48460d421ed774f19403cf53 | refs/heads/main | 2023-08-03T01:53:32.812949 | 2021-09-22T19:36:25 | 2021-09-22T19:36:25 | 409,303,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | from subprocess import check_output
def find_drive():
drive = ["A:","B:","C:","D:","E:","F:","G:","H:","Z:","N:","K:","L:","X:","P:","U:","J:","S:","R:","W:","Q:","T:","Y:","I:","O:","V:","M:"]
system_drive = []
cmd = check_output("net share",shell=True)
for i in drive:
if i in cmd:
system_drive.append(i)
return system_drive
def find_files(drives):
for p in Extension_Files:
try:
cmd = check_output("cd / && dir /S /B *."+p,shell=True)
f.writelines(cmd)
print p
except:
pass
for d in drives:
for p in Extension_Files:
try:
cmd = check_output(d+"&& dir /S /B *."+p,shell=True)
f.writelines(cmd)
print p+"-------"+d
except:
pass
f.close()
Extension_Files = ["jpg","txt","pdf"]
drives = find_drive()
f = open("File_Path.txt","w")
find_files(drives)
| [
"afsanehshu@gmail.com"
] | afsanehshu@gmail.com |
5bae2dd0b98a698b47ecc1b62c9b3f817edb250c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02706/s376556276.py | 52d2b727b08cf6404ff408485c8063321452bc56 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | def main():
n, m = map(int, input().split())
a_lst = list(map(int, input().split()))
total = sum(a_lst)
if total > n:
ans = -1
else:
ans = n - total
print(ans)
if __name__ == "__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1c67c11e8d837208916769996d99f60998ebd5a2 | 431963e431a277ec5682973f275f7d43f8b2b4de | /hackrankoj/strings/design_door_mat.py | 85161e85275b6c0c9e950c9dafe9bfb223d3ae88 | [] | no_license | gitter-badger/python_me | 168e1fd126f42c92de7e82833251abce63fe2d0a | 8f31da00530e7132e58d0c1c06b5805a6b0f96e6 | refs/heads/master | 2020-12-03T01:56:38.909715 | 2017-03-15T01:02:04 | 2017-03-15T01:02:04 | 95,884,399 | 0 | 0 | null | 2017-06-30T11:59:27 | 2017-06-30T11:59:27 | null | UTF-8 | Python | false | false | 602 | py |
N, M = map(int,raw_input().split())
#主要是这个输入的,map返回的是一个list,如果只有一个变量接受,那那个变量就是指向一个list,
#如果好几个,那就是unpack赋值,此时list当中元素的个数要和变量个数是一致的!
for i in xrange(1,N,2):
print ('.|.'*i).center(3*N,'-')
print 'WELCOME'.center(3*N,'-')
for i in xrange(N-2,-1,-2): #还有这个,range的范围到-1为止截止,不要和切片里面的-1混淆,好老是想怎么到最后一个元素去了,这里是一个概念么?!!
print ('.|.'*i).center(3*N,'-')
| [
"chenye626@gmail.com"
] | chenye626@gmail.com |
0edbee8ebd09aff5b83eced5e5ffea2cae7567eb | 1860aa3e5c0ba832d6dd12bb9af43a9f7092378d | /modules/xlwt3-0.1.2/examples/formulas.py | 460a7b22c17dd8037a1feebf63797f1a29d9c3c6 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | agz1990/GitPython | d90de16451fab9222851af790b67bcccdf35ab75 | 951be21fbf8477bad7d62423b72c3bc87154357b | refs/heads/master | 2020-08-06T18:12:26.459541 | 2015-07-05T14:58:57 | 2015-07-05T14:58:57 | 12,617,111 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Kiseliov Roman
from xlwt3 import *
w = Workbook()
ws = w.add_sheet('F')
ws.write(0, 0, Formula("-(1+1)"))
ws.write(1, 0, Formula("-(1+1)/(-2-2)"))
ws.write(2, 0, Formula("-(134.8780789+1)"))
ws.write(3, 0, Formula("-(134.8780789e-10+1)"))
ws.write(4, 0, Formula("-1/(1+1)+9344"))
ws.write(0, 1, Formula("-(1+1)"))
ws.write(1, 1, Formula("-(1+1)/(-2-2)"))
ws.write(2, 1, Formula("-(134.8780789+1)"))
ws.write(3, 1, Formula("-(134.8780789e-10+1)"))
ws.write(4, 1, Formula("-1/(1+1)+9344"))
ws.write(0, 2, Formula("A1*B1"))
ws.write(1, 2, Formula("A2*B2"))
ws.write(2, 2, Formula("A3*B3"))
ws.write(3, 2, Formula("A4*B4*sin(pi()/4)"))
ws.write(4, 2, Formula("A5%*B5*pi()/1000"))
##############
## NOTE: parameters are separated by semicolon!!!
##############
ws.write(5, 2, Formula("C1+C2+C3+C4+C5/(C1+C2+C3+C4/(C1+C2+C3+C4/(C1+C2+C3+C4)+C5)+C5)-20.3e-2"))
ws.write(5, 3, Formula("C1^2"))
ws.write(6, 2, Formula("SUM(C1;C2;;;;;C3;;;C4)"))
ws.write(6, 3, Formula("SUM($A$1:$C$5)"))
ws.write(7, 0, Formula('"lkjljllkllkl"'))
ws.write(7, 1, Formula('"yuyiyiyiyi"'))
ws.write(7, 2, Formula('A8 & B8 & A8'))
ws.write(8, 2, Formula('now()'))
ws.write(10, 2, Formula('TRUE'))
ws.write(11, 2, Formula('FALSE'))
ws.write(12, 3, Formula('IF(A1>A2;3;"hkjhjkhk")'))
w.save('formulas.xls')
| [
"522360568@qq.com"
] | 522360568@qq.com |
cab031ae11fb3718e0a6baeb7ec9c3ccfef1d7e9 | b92e187f60b1bc8bd74eaa0ffc6e1ac50911a08e | /python/randperson.py | 34f0715c975e12cabe2b53093dfeccf3c4ab59a7 | [] | no_license | code-xD/codefundo-hack | ad5b149726188bd15be7476f14adf90f08ff33d7 | f4883015d2d5f4b1b6a493ffa58249c46fc544a1 | refs/heads/master | 2022-12-10T10:41:30.388439 | 2019-08-20T12:06:19 | 2019-08-20T12:06:19 | 195,676,080 | 0 | 1 | null | 2022-05-25T03:12:11 | 2019-07-07T16:56:28 | CSS | UTF-8 | Python | false | false | 1,305 | py | from random import randint
import json
def genper():
randperlist = []
with open("randomdata/names.dat", 'r') as names:
with open("randomdata/address.dat", 'r') as address:
with open("randomdata/pin.dat", 'r') as pins:
for i in range(50):
randpersonDict = dict()
randpersonDict['voter_name'] = names.readline()[:-1]
randpersonDict['aLine1'] = address.readline()[:-1]
randpersonDict['aLine2'] = address.readline()[:-1]
randpersonDict['pin'] = pins.readline()[:-1]
randpersonDict['s_code'] = randint(1, 5)
randpersonDict['c_code'] = randint(1, randpersonDict['s_code']*2)
randpersonDict['d_code'] = randint(1, randpersonDict['c_code']*2)
randpersonDict['age'] = randint(18, 100)
if i < 25:
randpersonDict['gender'] = 1
else:
randpersonDict['gender'] = 2
randpersonDict['aadhar_no'] = int(
''.join([str(randint(1, 9)) for i in range(12)]))
randperlist.append(json.dumps(randpersonDict))
return randperlist
print(genper()[0])
| [
"shivansh586@gmail.com"
] | shivansh586@gmail.com |
2faad1676557dc4b03d872486ecf0b9987f89875 | 9a5b0bcc1302373fc37315e15def172e2d5a8e3a | /demo/libdemo/list_mobiles.py | ce3c4a8aad9f7232493144ebd7556683338cf873 | [] | no_license | srikanthpragada/PYTHON_06_APR_2020 | b74a7d2e80d8a8ffe203ba5bd562484e80b3502b | f999c5ca001137d21b9cd6ca55f3e35dd0e6c3ca | refs/heads/master | 2021-05-25T15:02:38.057653 | 2020-05-07T13:29:01 | 2020-05-07T13:29:01 | 253,799,331 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | f = open("mobiles.txt", "rt")
mobiles = []
for line in f.readlines():
parts = line.strip().split(',')
mobiles.extend(parts)
f.close()
for mobile in sorted(mobiles):
print(mobile)
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
2c5cac0cff0f1ded9317a146c18bfbd93283ec12 | b4166044870d1c026e86c95ac41e3e3613ee424f | /python_basic/abc083_b.py | 2601012108b74d5bd9f1837c7d3fee4758f9b704 | [] | no_license | nsakki55/AtCoder | 2cbb785415a7c0b9df9953ddc3706c90a5716a03 | 03c428e8eb8f24b8560d00e2388ba75509619690 | refs/heads/master | 2020-05-31T04:33:06.400697 | 2020-01-19T13:41:41 | 2020-01-19T13:41:41 | 190,099,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | n,a,b=map(int,input().split())
ans=0
for i in range(1,n+1):
if a<=sum(map(int,list(str(i))))<=b:
ans+=i
print(ans) | [
"n.sakki55@gmail.com"
] | n.sakki55@gmail.com |
b2081078ee4bfd2203caa42da05edebc5a56f1d9 | 34bfa980bf04365bde38f30506c4e375a5e40186 | /Question3.py | 60192f77eb5e5a9f96e621ac29b7a136b6039461 | [] | no_license | krishnabojha/Insight_workshopAssignment4 | a797f6fa650c099f3d2f3e86dce82bc670d158e6 | dcc2b9bae8ada46dc440b73406cea7b79796cff8 | refs/heads/master | 2022-11-10T15:12:22.742117 | 2020-07-05T12:55:46 | 2020-07-05T12:55:46 | 277,301,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | ############## find Anagram word of paragraph
from collections import defaultdict
def find_anagram(words):
groupedWords = defaultdict(list)
for word in words:
groupedWords["".join(sorted(word))].append(word)
print("The word and it's anagram : ")
for group in groupedWords.values():
print(" ".join(group))
if __name__ == "__main__":
paragraph =input("Enter your paragraph : ").split()
find_anagram(paragraph)
| [
"ojhakrishna010@gmail.com"
] | ojhakrishna010@gmail.com |
11def6de3c772d0ff5c0f3a581b1122036c21c10 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /0918_Maximum_Sum_Circular_Subarray/try_1.py | d4bcafd84f354962e6ea4f78b507e82d76e60c5c | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 653 | py | class Solution:
def maxSubarraySumCircular(self, A: List[int]) -> int:
# Kadane's algorithm : https://www.youtube.com/watch?v=86CQq3pKSUw
def Kadane(A):
cur_max = max_sum = A[0]
cur_min = min_sum = A[0]
sum = A[0]
for i in range(1, len(A)):
cur_max = max(A[i], cur_max + A[i])
max_sum = max(max_sum, cur_max)
cur_min = min(A[i], cur_min + A[i])
min_sum = min(min_sum, cur_min)
sum += A[i]
return max(max_sum, sum-min_sum) if max_sum > 0 else max_sum
return Kadane(A)
| [
"f14051172@gs.ncku.edu.tw"
] | f14051172@gs.ncku.edu.tw |
799897d2411998aa3a754bb8328495b8a607867c | 74cafb5c10a700fb7aca1447edff45235563b304 | /Exercises_2/loops/l12.py | d2c7ddfe1173577eabb409c9819df25394b2d98b | [] | no_license | marcinpgit/Python_exercises | 68be4e0e731ba5efb48c4d5cf28189ed7df8d179 | 149fc3a811c743c2e32d04960f682f158fd34c1f | refs/heads/master | 2021-07-04T16:07:38.001001 | 2017-09-27T21:42:27 | 2017-09-27T21:42:27 | 104,941,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | # Write a Python program that accepts a sequence of lines (blank line to terminate) as input and
# prints the lines as output (all characters in lower case)
lines = []
while True:
l = input()
if l:
lines.append(l.upper())
else:
break;
for l in lines:
print(l) | [
"marcinp2012@gmail.com"
] | marcinp2012@gmail.com |
0a9067acff6d6d906b49a2d4d1295289d14610ad | d1ad901e1e926d9c92ce4dc7a7ba3c6ee91a65e2 | /tests/portstat/test_portstat.py | 3750cfd7d64c998e2098e254e336ab52960060f4 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | SubhajitPalKeysight/sonic-mgmt | ff59c2c5baf53cc2575aea2d541278fc9cf56977 | e4b308a82572996b531cc09cbc6ba98b9bd283ea | refs/heads/master | 2022-12-31T01:03:47.757864 | 2020-10-15T11:04:37 | 2020-10-15T11:04:37 | 286,815,154 | 1 | 1 | NOASSERTION | 2020-08-11T18:08:34 | 2020-08-11T18:08:33 | null | UTF-8 | Python | false | false | 7,977 | py |
import logging
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait
logger = logging.getLogger('__name__')
pytestmark = [
pytest.mark.topology('any')
]
def parse_column_positions(separation_line, separation_char='-'):
'''Parse the position of each columns in the command output
Args:
separation_line (string): The output line separating actual data and column headers
separation_char (str, optional): The character used in separation line. Defaults to '-'.
Returns:
[list]: A list. Each item is a tuple with two elements. The first element is start position of a column. The
second element is the end position of the column.
'''
prev = ' ',
positions = []
for pos, char in enumerate(separation_line + ' '):
if char == separation_char:
if char != prev:
left = pos
else:
if char != prev:
right = pos
positions.append((left, right))
prev = char
return positions
def parse_portstat(content_lines):
'''Parse the output of portstat command
Args:
content_lines (list): The output lines of portstat command
Returns:
list: A dictionary, key is interface name, value is a dictionary of fields/values
'''
header_line = ''
separation_line = ''
separation_line_number = 0
for idx, line in enumerate(content_lines):
if line.find('----') >= 0:
header_line = content_lines[idx-1]
separation_line = content_lines[idx]
separation_line_number = idx
break
try:
positions = parse_column_positions(separation_line)
except Exception:
logger.error('Possibly bad command output')
return {}
headers = []
for pos in positions:
header = header_line[pos[0]:pos[1]].strip().lower()
headers.append(header)
if not headers:
return {}
results = {}
for line in content_lines[separation_line_number+1:]:
portstats = []
for pos in positions:
portstat = line[pos[0]:pos[1]].strip()
portstats.append(portstat)
intf = portstats[0]
results[intf] = {}
for idx in range(1, len(portstats)): # Skip the first column interface name
results[intf][headers[idx]] = portstats[idx]
return results
@pytest.fixture(scope='function', autouse=True)
def reset_portstat(duthost):
logger.info('Clear out all tags')
duthost.command('portstat -D', become=True, module_ignore_errors=True)
yield
logger.info("Reset portstate ")
duthost.command('portstat -D', become=True, module_ignore_errors=True)
@pytest.mark.parametrize('command', ['portstat -c', 'portstat --clear'])
def test_portstat_clear(duthost, command):
wait(30, 'Wait for DUT to receive/send some packets')
before_portstat = parse_portstat(duthost.command('portstat')['stdout_lines'])
pytest_assert(before_portstat, 'No parsed command output')
duthost.command(command)
wait(1, 'Wait for portstat counters to refresh')
after_portstat = parse_portstat(duthost.command('portstat')['stdout_lines'])
pytest_assert(after_portstat, 'No parsed command output')
"""
Assert only when rx/tx count is no smaller than COUNT_THRES because DUT may send or receive
some packets during test after port status are clear
"""
COUNT_THRES = 10
for intf in before_portstat:
rx_ok_before = int(before_portstat[intf]['rx_ok'].replace(',',''))
rx_ok_after = int(after_portstat[intf]['rx_ok'].replace(',',''))
tx_ok_before = int(before_portstat[intf]['tx_ok'].replace(',',''))
tx_ok_after = int(after_portstat[intf]['tx_ok'].replace(',',''))
if int(rx_ok_before >= COUNT_THRES):
pytest_assert(rx_ok_before >= rx_ok_after,
'Value of RX_OK after clear should be lesser')
if int(tx_ok_before >= COUNT_THRES):
pytest_assert(tx_ok_before >= tx_ok_after,
'Value of TX_OK after clear should be lesser')
@pytest.mark.parametrize('command', ['portstat -D', 'portstat --delete-all'])
def test_portstat_delete_all(duthost, command):
stats_files = ('test_1', 'test_2', 'test_test')
logger.info('Create several test stats files')
for stats_file in stats_files:
duthost.command('portstat -c -t {}'.format(stats_file))
logger.info('Verify that the file names are in the /tmp directory')
uid = duthost.command('id -u')['stdout'].strip()
for stats_file in stats_files:
pytest_assert(duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
logger.info('Run the command to be tested "{}"'.format(command))
duthost.command(command)
logger.info('Verify that the file names are not in the /tmp directory')
for stats_file in stats_files:
pytest_assert(not duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
@pytest.mark.parametrize('command',
['portstat -d -t', 'portstat -d --tag', 'portstat --delete -t', 'portstat --delete --tag'])
def test_portstat_delete_tag(duthost, command):
stats_files = ('test_1', 'test_2', 'test_delete_me')
file_to_delete = stats_files[2]
files_not_deleted = stats_files[:2]
logger.info('Create several test stats files')
for stats_file in stats_files:
duthost.command('portstat -c -t {}'.format(stats_file))
logger.info('Verify that the file names are in the /tmp directory')
uid = duthost.command('id -u')['stdout'].strip()
for stats_file in stats_files:
pytest_assert(duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
full_delete_command = command + ' ' + file_to_delete
logger.info('Run the command to be tested "{}"'.format(full_delete_command))
duthost.command(full_delete_command)
logger.info('Verify that the deleted file name is not in the directory')
pytest_assert(not duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=file_to_delete))['stat']['exists'])
logger.info('Verify that the remaining file names are in the directory')
for stats_file in files_not_deleted:
pytest_assert(duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
@pytest.mark.parametrize('command', ['portstat -a', 'portstat --all'])
def test_portstat_display_all(duthost, command):
base_portstat = parse_portstat(duthost.command('portstat')['stdout_lines'])
all_portstats = parse_portstat(duthost.command(command)['stdout_lines'])
pytest_assert(base_portstat and all_portstats, 'No parsed command output')
logger.info('Verify the all number of columns is greater than the base number of columns')
for intf in all_portstats.keys():
pytest_assert(len(all_portstats[intf].keys()) > len(base_portstat[intf].keys()))
@pytest.mark.parametrize('command', ['portstat -p 1', 'portstat --period 1'])
def test_portstat_period(duthost, command):
output = duthost.command(command)
pytest_assert('The rates are calculated within 1 seconds period' in output['stdout_lines'][0])
@pytest.mark.parametrize('command', ['portstat -h', 'portstat --help', 'portstat', 'portstat -v',
'portstat --version', 'portstat -j', 'portstat --json',
'portstat -r', 'portstat --raw'])
def test_portstat_no_exceptions(duthost, command):
logger.info('Verify that the commands do not cause tracebacks')
duthost.command(command)
| [
"noreply@github.com"
] | SubhajitPalKeysight.noreply@github.com |
b2c183b7fdf094d29b43ebc5e76045156e4d658c | 8930d3c7a4c8441c4129b49fc98c5c88c395fa67 | /deepy/core/disconnected_grad.py | 49a2afa2ff44e3371c9e54710a7d8925594c2d59 | [
"MIT"
] | permissive | degerli/deepy | 2b5d1f456cdd025b609aad4a47eaa55494960bdc | 090fbad22a08a809b12951cd0d4984f5bd432698 | refs/heads/master | 2020-04-15T00:27:30.637696 | 2017-01-10T04:27:39 | 2017-01-10T04:27:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from theano.compile import ViewOp
from theano.gradient import DisconnectedType
class DisconnectedGrad(ViewOp):
def grad(self, args, g_outs):
return [ DisconnectedType()() for g_out in g_outs]
def connection_pattern(self, node):
return [[False]]
disconnected_grad = DisconnectedGrad() | [
"raphael@uaca.com"
] | raphael@uaca.com |
297e61e05829aff2550077a269e98ab8d80e37cd | a60e81b51935fb53c0900fecdadba55d86110afe | /MachineLearning/Softmax Regression/main.py | 0835b6604536cd468caf7d5d2807bb8b80fecde4 | [] | no_license | FrankieZhen/Lookoop | fab6855f5660467f70dc5024d9aa38213ecf48a7 | 212f8b83d6ac22db1a777f980075d9e12ce521d2 | refs/heads/master | 2020-07-27T08:12:45.887814 | 2019-09-16T11:48:20 | 2019-09-16T11:48:20 | 209,021,915 | 1 | 0 | null | 2019-09-17T10:10:46 | 2019-09-17T10:10:46 | null | UTF-8 | Python | false | false | 4,138 | py | # 2018-9-30
# Softmax Regression
# python 机器学习算法
import numpy as np
import random as rd
import matplotlib.pyplot as plt
def loadData(file_name, skip=True):
"""
载入数据
"""
feature = []
label = []
file = open(file_name)
for line in file.readlines():
feature_tmp = []
label_tmp = []
data = line.strip().split("\t") # split(" ")
feature_tmp.append(1) # 偏置项
if skip:
for i in data[:-1]:
feature_tmp.append(float(i))
else:
for i in data:
feature_tmp.append(float(i))
label_tmp.append(int(data[-1]))
feature.append(feature_tmp)
label.append(label_tmp)
file.close()
return np.mat(feature), np.mat(label)
def train(feature, label, k, max_iteration, alpha):
"""
梯度下降法
"""
m, n = np.shape(feature)
weights = np.mat(np.ones((n, k))) # n x k
i = 0
while i <= max_iteration:
y = np.exp(feature * weights) # m x k
if i % 500 == 0:
error_rate = cost(y, label)
print("iteration: %d, error rate: %.10f" % (i, error_rate))
row_sum = -y.sum(axis=1) # 按行相加 m x 1
row_sum = row_sum.repeat(k, axis=1) # 每个样本都需要除以总值, 所以转换为 m x k
# # 关于sum, repeat函数的用法
# weight = np.mat(np.ones((3, 2)))
# print(weight)
# # [[1. 1.]
# # [1. 1.]
# # [1. 1.]]
# weight = weight.sum(axis=1)
# print(weight)
# # [[2.]
# # [2.]
# # [2.]]
# weight = weight.repeat(3, axis=1)
# print(weight)
# # [[2. 2. 2.]
# # [2. 2. 2.]
# # [2. 2. 2.]]
y = y / row_sum # 得到-P(y|x,w)
for x in range(m):
y[x, label[x, 0]] += 1
weights = weights + (alpha / m) * feature.T * y
i += 1
return weights
def cost(err, label_data):
'''
计算损失函数值
input: err(mat):exp的值
label_data(mat):标签的值
output: sum_cost / m(float):损失函数的值
'''
m = np.shape(err)[0]
sum_cost = 0.0
for i in range(m):
if err[i, label_data[i, 0]] / np.sum(err[i, :]) > 0:
sum_cost -= np.log(err[i, label_data[i, 0]] / np.sum(err[i, :]))
else:
sum_cost -= 0
return sum_cost / m
def load_data(num, m):
'''
导入测试数据
input: num(int)生成的测试样本的个数
m(int)样本的维数
output: testDataSet(mat)生成测试样本
'''
testDataSet = np.mat(np.ones((num, m)))
for i in range(num):
testDataSet[i, 1] = rd.random() * 6 - 3#随机生成[-3,3]之间的随机数
testDataSet[i, 2] = rd.random() * 15#随机生成[0,15]之间是的随机数
return testDataSet
def predict(test_data, weights):
'''
利用训练好的Softmax模型对测试数据进行预测
input: test_data(mat)测试数据的特征
weights(mat)模型的权重
output: h.argmax(axis=1)所属的类别
'''
h = test_data * weights
print(h)
return h.argmax(axis=1) # 获得最大索引位置即标签
if __name__ == "__main__":
inputfile = "data.txt"
# 1、导入训练数据
feature, label = loadData(inputfile)
print(np.shape(feature), np.shape(label))
# x = []
# y = []
# for i in range(np.shape(feature)[0]):
# x.append(feature[i, 1])
# y.append(feature[i, 2])
# x = np.array(x)
# y = np.array(y)
# color = np.arctan2(y, x)
# # 绘制散点图
# plt.scatter(x, y, s = 75, c = color, alpha = 0.5)
# # 设置坐标轴范围
# plt.xlim((-5, 5))
# plt.ylim((-5, 5))
# # 不显示坐标轴的值
# plt.xticks(())
# plt.yticks(())
# plt.show()
# k = 4
# # 2、训练Softmax模型
# weights = train(feature, label, k, 10000, 0.4)
# print(weights)
# # 3. 预测
# m, n = np.shape(weights)
# data = load_data(4000, m)
# res = predict(data, weights)
# print(res)
| [
"33798487+YangXiaoo@users.noreply.github.com"
] | 33798487+YangXiaoo@users.noreply.github.com |
4d9848e2c6c4c0920d5eb8244bab8e9739ce5e48 | 82a9077bcb5a90d88e0a8be7f8627af4f0844434 | /google-cloud-sdk/lib/tests/unit/command_lib/kuberun/flags_test.py | be981d4155210013659894e9e3bedb5c88470db0 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | piotradamczyk5/gcloud_cli | 1ae2553595e569fad6ce84af62b91a7ee5489017 | 384ece11040caadcd64d51da74e0b8491dd22ca3 | refs/heads/master | 2023-01-01T23:00:27.858583 | 2020-10-21T04:21:23 | 2020-10-21T04:21:23 | 290,238,061 | 0 | 0 | null | 2020-10-19T16:43:36 | 2020-08-25T14:31:00 | Python | UTF-8 | Python | false | false | 5,681 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for flags module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.kuberun import flags
from tests.lib import test_case
import mock
class FlagsTest(test_case.TestCase):
"""Unit tests for flags module."""
def SetUp(self):
self.parser = mock.Mock()
self.args = mock.Mock()
def testStringFlag_AddToParser(self):
flag_name = '--test-flag'
help_text = 'some help text'
string_flag = flags.StringFlag(flag_name, help=help_text)
string_flag.AddToParser(self.parser)
self.parser.assert_has_calls(
[mock.call.add_argument(flag_name, help=help_text)])
def testStringFlag_FormatFlags_present(self):
expected_value = 'expected_test_value'
self.args.test_flag = expected_value
self.args.IsSpecified.return_value = True
string_flag = flags.StringFlag('--test-flag')
actual = string_flag.FormatFlags(self.args)
self.args.IsSpecified.assert_called_with('test_flag')
self.assertListEqual(['--test-flag', expected_value], actual)
def testStringFlag_FormatFlags_missing(self):
self.args.IsSpecified.return_value = False
string_flag = flags.StringFlag('--test-flag')
actual = string_flag.FormatFlags(self.args)
self.assertListEqual([], actual)
def testStringFlag_FormatFlags_coerceToString(self):
integer_value = 10
self.args.test_flag = integer_value
self.args.IsSpecified.return_value = True
string_flag = flags.StringFlag('--test-flag')
actual = string_flag.FormatFlags(self.args)
self.args.IsSpecified.assert_called_with('test_flag')
self.assertListEqual(['--test-flag', str(integer_value)], actual)
def testBooleanFlag_AddToParser(self):
flag_name = '--test-boolean-flag'
boolean_flag = flags.BooleanFlag(flag_name)
boolean_flag.AddToParser(self.parser)
self.parser.assert_has_calls([
mock.call.add_argument(
flag_name, action=arg_parsers.StoreTrueFalseAction)
])
def testBooleanFlag_FormatFlags_present(self):
flag_name = '--test-boolean-flag'
boolean_flag = flags.BooleanFlag(flag_name)
self.args.GetSpecifiedArgNames.return_value = [flag_name]
actual = boolean_flag.FormatFlags(self.args)
self.args.GetSpecifiedArgNames.assert_called()
self.assertListEqual([flag_name], actual)
def testBooleanFlag_FormatFlags_missing(self):
flag_name = '--test-boolean-flag'
boolean_flag = flags.BooleanFlag(flag_name)
missing_flag_name = '--no-test-boolean-flag'
self.args.GetSpecifiedArgNames.return_value = [missing_flag_name]
actual = boolean_flag.FormatFlags(self.args)
self.args.GetSpecifiedArgNames.assert_called()
self.assertListEqual([missing_flag_name], actual)
def testBasicFlag_AddToParser(self):
flag_name = '--basic-flag'
basic_flag = flags.BasicFlag(flag_name)
basic_flag.AddToParser(self.parser)
self.parser.assert_has_calls(
[mock.call.add_argument(flag_name, default=False, action='store_true')])
def testBasicFlag_FormatFlags_present(self):
flag_name = '--basic-flag'
basic_flag = flags.BasicFlag(flag_name)
self.args.IsSpecified.return_value = True
actual = basic_flag.FormatFlags(self.args)
self.args.IsSpecified.assert_called_with('basic_flag')
self.assertListEqual([flag_name], actual)
def testBasicFlag_FormatFlags_missing(self):
flag_name = '--basic-flag'
basic_flag = flags.BasicFlag(flag_name)
self.args.IsSpecified.return_value = False
actual = basic_flag.FormatFlags(self.args)
self.args.IsSpecified.assert_called_with('basic_flag')
self.assertListEqual([], actual)
def testFlagGroup_AddToParser(self):
flag1 = mock.create_autospec(flags.BinaryCommandFlag)
flag2 = mock.create_autospec(flags.BinaryCommandFlag)
flag3 = mock.create_autospec(flags.BinaryCommandFlag)
flag_group = flags.FlagGroup(flag1, flag2, flag3)
flag_group.AddToParser(self.parser)
flag1.assert_has_calls([mock.call.AddToParser(self.parser)])
flag2.assert_has_calls([mock.call.AddToParser(self.parser)])
flag3.assert_has_calls([mock.call.AddToParser(self.parser)])
def testFlagGroup_FormatFlags(self):
flag1 = mock.create_autospec(flags.BinaryCommandFlag)
flag2 = mock.create_autospec(flags.BinaryCommandFlag)
flag3 = mock.create_autospec(flags.BinaryCommandFlag)
flag_group = flags.FlagGroup(flag1, flag2, flag3)
flag1.FormatFlags.return_value = ['--flag1']
flag2.FormatFlags.return_value = ['--flag2', 'blah']
flag3.FormatFlags.return_value = ['--no-flag3']
actual = flag_group.FormatFlags(self.args)
flag1.assert_has_calls([mock.call.FormatFlags(self.args)])
flag2.assert_has_calls([mock.call.FormatFlags(self.args)])
flag3.assert_has_calls([mock.call.FormatFlags(self.args)])
self.assertListEqual(['--flag1', '--flag2', 'blah', '--no-flag3'], actual)
if __name__ == '__main__':
test_case.main()
| [
"actions@github.com"
] | actions@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.