blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87c0bb86ab7e49b7da4231abad6364ea302f122e
|
b5b1be6063901bd0bc97c3fbc2c26be1d02ce79e
|
/output/__init__.py
|
e9b0cb52d8081c4e39506efa55060fb586d86b77
|
[] |
no_license
|
dssg/rcra
|
ff566ff388596a733757e4de27631cbabdc7f15c
|
fcdd8f95c25902e46c55d85cbc4fe54196163f3d
|
refs/heads/master
| 2021-06-11T13:09:01.686456
| 2017-02-24T05:19:51
| 2017-02-24T05:19:51
| 61,817,642
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from drain.data import FromSQL
facilities = FromSQL("""
select rcra_id, zip_code, dedupe_id as entity_id
from output.facilities
join dedupe.unique_map using (rcra_id)
""", tables=['output.facilities', 'output.handler_names'])
facilities.target=True
|
[
"eric@k2co3.net"
] |
eric@k2co3.net
|
4a9e8aa14c6caaa64e388c73cf1955139791697f
|
3a771b72dae1aae406b94726bcbcf73915577b18
|
/q11.py
|
7957ae29604c51312fee4b0a13e0a5bfe42decff
|
[] |
no_license
|
SHANK885/Python-Basic-Programs
|
4fcb29280412baa63ffd33efba56d9f59770c9dc
|
157f0f871b31c4523b6873ce5dfe0d6e26a6dc61
|
refs/heads/master
| 2021-07-18T18:24:10.455282
| 2018-11-19T07:02:27
| 2018-11-19T07:02:27
| 138,009,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
'''
Question:
Write a program which accepts a sequence of comma separated 4 digit binary numbers as its input and then check whether they are divisible by 5 or not. The numbers that are divisible by 5 are to be printed in a comma separated sequence.
Example:
0100,0011,1010,1001
Then the output should be:
1010
Notes: Assume the data is input by console.
'''
out = []
binary = input("Enter comma separated 4 digit binary number : ")
bin_list = [b for b in binary.split(",")]
for item in bin_list:
int_item = int(item, 2)
if int_item%5 == 0:
out.append(item)
print(",".join(out))
|
[
"shashankshekhar885@gmail.com"
] |
shashankshekhar885@gmail.com
|
4b817c90da1a1bf413b75b098e4e7aced20b4cdb
|
8034442a9778043b1d886220a3c928327b6297d4
|
/Case_rbm/vlan_bond/index.py
|
2225ed51c99d287e815f12237d49525615f04bb8
|
[] |
no_license
|
wangqian0818/auto_test
|
5efe6d7b41ff01e6a9f10211674f55e195484a1c
|
803a485d9720f090f7fa5d4482092cc4e7d9aa73
|
refs/heads/master
| 2023-08-24T01:27:40.956398
| 2021-11-02T02:12:14
| 2021-11-02T02:12:14
| 367,355,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
#coding:utf-8
from common import baseinfo
vlanCardid = str(baseinfo.gwVlanCardid)
vlanA = str(baseinfo.vlanA)
vlanB = str(baseinfo.vlanB)
#配置下发
#列表里面的顺序依次为:查询命令,预期结果
case1_step1={
"step1":[f"export cardid={vlanCardid}&&switch-jsac --set --module 12 --switch on",f"export cardid={vlanCardid}&&switch-jsac --get | grep 12","on"],
"step2":[f"export cardid={vlanCardid}&&switch-jsac --set --module 15 --switch on",f"export cardid={vlanCardid}&&switch-jsac --get | grep 15","on"]
}
case1_step2={
"step1":[f"export cardid={vlanCardid}&&vlan-jsac --get",vlanA],
"step2":[f"export cardid={vlanCardid}&&vlan-jsac --get",vlanB],
"step3":[f"export cardid={vlanCardid}&&vlan-jsac --get |wc -l",'5']
}
case1_step11={
"step1":[f"export cardid={vlanCardid}&&switch-jsac --set --module 12 --switch off",f"export cardid={vlanCardid}&&switch-jsac --get | grep 12","off"],
"step2":[f"export cardid={vlanCardid}&&switch-jsac --set --module 15 --switch off",f"export cardid={vlanCardid}&&switch-jsac --get | grep 15","off"]
}
|
[
"wangqianjob0818@163.com"
] |
wangqianjob0818@163.com
|
beb8b556b8292e3e60a49a4dd5625d013750d1d7
|
aa480d8b09dd7ad92c37c816ebcace24a35eb34c
|
/third-round/43.字符串相乘.py
|
cf3fd3f45529613e3a133392c58f55c8d88caa5a
|
[] |
no_license
|
SR2k/leetcode
|
7e701a0e99f9f05b21216f36d2f5ac07a079b97f
|
de131226159865dcb7b67e49a58d2ddc3f0a82c7
|
refs/heads/master
| 2023-03-18T03:37:02.916453
| 2022-09-16T01:28:13
| 2022-09-16T01:28:13
| 182,083,445
| 0
| 0
| null | 2023-03-08T05:44:26
| 2019-04-18T12:27:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
#
# @lc app=leetcode.cn id=43 lang=python3
#
# [43] 字符串相乘
#
# https://leetcode-cn.com/problems/multiply-strings/description/
#
# algorithms
# Medium (44.96%)
# Likes: 862
# Dislikes: 0
# Total Accepted: 205.5K
# Total Submissions: 457.8K
# Testcase Example: '"2"\n"3"'
#
# 给定两个以字符串形式表示的非负整数 num1 和 num2,返回 num1 和 num2 的乘积,它们的乘积也表示为字符串形式。
#
# 注意:不能使用任何内置的 BigInteger 库或直接将输入转换为整数。
#
#
#
# 示例 1:
#
#
# 输入: num1 = "2", num2 = "3"
# 输出: "6"
#
# 示例 2:
#
#
# 输入: num1 = "123", num2 = "456"
# 输出: "56088"
#
#
#
# 提示:
#
#
# 1 <= num1.length, num2.length <= 200
# num1 和 num2 只能由数字组成。
# num1 和 num2 都不包含任何前导零,除了数字0本身。
#
#
#
# @lc code=start
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0':
return '0'
result = [0] * (len(num1) + len(num2))
for i in range(len(num1)):
for j in range(len(num2)):
r = (ord(num1[-(i + 1)]) - ord('0')) * (ord(num2[-(j + 1)]) - ord('0'))
result[i + j] += r
carry = 0
for i in range(len(result)):
d = result[i] + carry
carry = d // 10
result[i] = str(d % 10)
while result[-1] == '0':
result.pop()
result.reverse()
return "".join(result)
# @lc code=end
print(Solution().multiply("2", "3"))
print(Solution().multiply("123", "456"))
|
[
"luozhou.csy@alibaba-inc.com"
] |
luozhou.csy@alibaba-inc.com
|
1edb79a9fc5cdd76785d4f5fbdf777056346feff
|
2bcc421ee345b00cf805c543b37d18b5d019dc04
|
/adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/adafruit_io_simpletest.py
|
13f48ce77609ae495c9aae8bea5cbbb6b5a5fc34
|
[] |
no_license
|
saewoonam/sc-current-source-titano
|
5a1ad46889c1b09c168424901fd71cb4eab5c61b
|
1c136aa8b61268d9ac0b5a682b30ece70ab87663
|
refs/heads/main
| 2023-03-02T22:12:26.685537
| 2021-02-09T03:28:01
| 2021-02-09T03:28:01
| 317,299,900
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,684
|
py
|
# Example of using the Adafruit IO CircuitPython MQTT client
# to subscribe to an Adafruit IO feed and publish random data
# to be received by the feed.
#
# Example by Tony DiCola for Adafruit Industries
# Modified by Brent Rubell for Adafruit Industries, 2019
import time
from random import randint
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
import neopixel
import adafruit_minimqtt.adafruit_minimqtt as MQTT
from adafruit_io.adafruit_io import IO_MQTT
### WiFi ###
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# If you are using a board with pre-defined ESP32 Pins:
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
# If you have an externally connected ESP32:
# esp32_cs = DigitalInOut(board.D9)
# esp32_ready = DigitalInOut(board.D10)
# esp32_reset = DigitalInOut(board.D5)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
"""Use below for Most Boards"""
status_light = neopixel.NeoPixel(
board.NEOPIXEL, 1, brightness=0.2
) # Uncomment for Most Boards
"""Uncomment below for ItsyBitsy M4"""
# status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
# Uncomment below for an externally defined RGB LED
# import adafruit_rgbled
# from adafruit_esp32spi import PWMOut
# RED_LED = PWMOut.PWMOut(esp, 26)
# GREEN_LED = PWMOut.PWMOut(esp, 27)
# BLUE_LED = PWMOut.PWMOut(esp, 25)
# status_light = adafruit_rgbled.RGBLED(RED_LED, BLUE_LED, GREEN_LED)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Define callback functions which will be called when certain events happen.
# pylint: disable=unused-argument
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print("Connected to Adafruit IO! Listening for DemoFeed changes...")
# Subscribe to changes on a feed named DemoFeed.
client.subscribe("DemoFeed")
def subscribe(client, userdata, topic, granted_qos):
# This method is called when the client subscribes to a new feed.
print("Subscribed to {0} with QOS level {1}".format(topic, granted_qos))
def unsubscribe(client, userdata, topic, pid):
# This method is called when the client unsubscribes from a feed.
print("Unsubscribed from {0} with PID {1}".format(topic, pid))
# pylint: disable=unused-argument
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print("Disconnected from Adafruit IO!")
# pylint: disable=unused-argument
def message(client, feed_id, payload):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
print("Feed {0} received new value: {1}".format(feed_id, payload))
# Connect to WiFi
print("Connecting to WiFi...")
wifi.connect()
print("Connected!")
# Initialize MQTT interface with the esp interface
MQTT.set_socket(socket, esp)
# Initialize a new MQTT Client object
mqtt_client = MQTT.MQTT(
broker="io.adafruit.com",
username=secrets["aio_username"],
password=secrets["aio_key"],
)
# Initialize an Adafruit IO MQTT Client
io = IO_MQTT(mqtt_client)
# Connect the callback methods defined above to Adafruit IO
io.on_connect = connected
io.on_disconnect = disconnected
io.on_subscribe = subscribe
io.on_unsubscribe = unsubscribe
io.on_message = message
# Connect to Adafruit IO
print("Connecting to Adafruit IO...")
io.connect()
# Below is an example of manually publishing a new value to Adafruit IO.
last = 0
print("Publishing a new message every 10 seconds...")
while True:
# Explicitly pump the message loop.
io.loop()
# Send a new message every 10 seconds.
if (time.monotonic() - last) >= 5:
value = randint(0, 100)
print("Publishing {0} to DemoFeed.".format(value))
io.publish("DemoFeed", value)
last = time.monotonic()
|
[
"nams@nist.gov"
] |
nams@nist.gov
|
26903997659e0a6ffeafaf3ae4e966b68f912e5f
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/management/commands/update_course_outline.py
|
b3ba3bd289199b663c7d1951a01790bf3d31bc50
|
[
"MIT",
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252
| 2021-11-22T12:14:34
| 2021-11-22T12:14:34
| 163,850,454
| 3
| 1
|
MIT
| 2021-11-22T12:12:31
| 2019-01-02T14:21:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
"""
Management command to create the course outline for a course. This is done
automatically when Studio publishes a course, but this command can be used to
do it manually for debugging, error recovery, or backfilling purposes.
Should be invoked from the Studio process.
"""
from django.core.management.base import BaseCommand
from opaque_keys.edx.keys import CourseKey
from ...tasks import update_outline_from_modulestore
class Command(BaseCommand):
"""
Invoke with:
python manage.py cms update_course_outline <course_key>
"""
help = "Updates a single course outline based on modulestore content."
def add_arguments(self, parser):
parser.add_argument('course_key')
def handle(self, *args, **options):
course_key = CourseKey.from_string(options['course_key'])
update_outline_from_modulestore(course_key)
|
[
"rafael.luque@osoco.es"
] |
rafael.luque@osoco.es
|
112fe187347b14db8e486b104480e002a756dd8c
|
7ae32748fb910d2542e35c57543fc89f98cd2b1d
|
/tests/test_lib.py
|
e9e421020e8b554caa7f433988afc2ac71c66236
|
[
"Apache-2.0"
] |
permissive
|
sanjaymsh/dtfabric
|
451c87d987f438fccfbb999079d2f55d01650b68
|
9e216f90b70d8a3074b2125033e0773e3e482355
|
refs/heads/master
| 2022-12-19T09:13:02.370724
| 2020-09-27T05:11:25
| 2020-09-27T05:11:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
# -*- coding: utf-8 -*-
"""Shared test case."""
from __future__ import unicode_literals
import os
import sys
import unittest
from dtfabric import reader
from dtfabric import registry
def skipUnlessHasTestFile(path_segments): # pylint: disable=invalid-name
"""Decorator to skip a test if the test file does not exist.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
function: to invoke.
"""
fail_unless_has_test_file = getattr(
unittest, 'fail_unless_has_test_file', False)
path = os.path.join('test_data', *path_segments)
if fail_unless_has_test_file or os.path.exists(path):
return lambda function: function
if sys.version_info[0] < 3:
path = path.encode('utf-8')
# Note that the message should be of type str which is different for
# different versions of Python.
return unittest.skip('missing test file: {0:s}'.format(path))
class BaseTestCase(unittest.TestCase):
"""The base test case."""
_TEST_DATA_PATH = os.path.join(os.getcwd(), 'test_data')
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _CreateDefinitionRegistryFromFile(self, path):
"""Creates a data type definition registry from a file.
Args:
path (str): path to the data definition file.
Returns:
DataTypeDefinitionsRegistry: data type definition registry or None
on error.
"""
definitions_registry = registry.DataTypeDefinitionsRegistry()
self._FillDefinitionRegistryFromFile(definitions_registry, path)
return definitions_registry
def _FillDefinitionRegistryFromFile(self, definitions_registry, path):
"""Fills a data type definition registry from a file.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
path (str): path to the data definition file.
"""
definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()
with open(path, 'rb') as file_object:
definitions_reader.ReadFileObject(definitions_registry, file_object)
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(self._TEST_DATA_PATH, *path_segments)
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
7ff5ace33b7b5f94bd27e78e54a51bb4adfe7e97
|
e58fcc1467ad81084b016d2a48d672d75da2c058
|
/rdkit/Code/DataStructs/Wrap/testSparseIntVect.py
|
3cc02547f6b0be4e87ec009699478e2eb5f412f7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ali1810/sol_heroku
|
294375d70c656452749e959bfb851a50defc0e01
|
97b548ce7d864e6fed936c53b790c1dc8038cff2
|
refs/heads/main
| 2023-08-15T06:18:26.933254
| 2021-09-14T10:20:19
| 2021-09-14T10:20:19
| 405,223,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,585
|
py
|
# $Id$
#
# Copyright (C) 2007,2008 Greg Landrum
#
# @@ All Rights Reserved @@
#
import os, sys
import io
import unittest
import pickle
from rdkit import RDConfig
from rdkit import DataStructs as ds
import random
def feq(v1, v2, tol=1e-4):
return abs(v1 - v2) < tol
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Int(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError, lambda: v1[5])
v1[0] = 1
v1[2] = 2
v1[3] = 3
self.assertTrue(v1 == v1)
self.assertTrue(v1.GetLength() == 5)
v2 = ds.IntSparseIntVect(5)
self.assertTrue(v1 != v2)
v2 |= v1
self.assertTrue(v2 == v1)
v3 = v2 | v1
self.assertTrue(v3 == v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs == {0: 1, 2: 2, 3: 3})
def test2Long(self):
"""
"""
l = 1 << 42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l])
v1[0] = 1
v1[2] = 2
v1[1 << 35] = 3
self.assertTrue(v1 == v1)
self.assertTrue(v1.GetLength() == l)
v2 = ds.LongSparseIntVect(l)
self.assertTrue(v1 != v2)
v2 |= v1
self.assertTrue(v2 == v1)
v3 = v2 | v1
self.assertTrue(v3 == v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs == {0: 1, 2: 2, 1 << 35: 3})
def test3Pickle1(self):
"""
"""
l = 1 << 42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l + 1])
v1[0] = 1
v1[2] = 2
v1[1 << 35] = 3
self.assertTrue(v1 == v1)
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(v2 == v1)
v3 = ds.LongSparseIntVect(v2.ToBinary())
self.assertTrue(v2 == v3)
self.assertTrue(v1 == v3)
#pickle.dump(v1,file('lsiv.pkl','wb+'))
with open(os.path.join(RDConfig.RDBaseDir, 'Code/DataStructs/Wrap/testData/lsiv.pkl'),
'r') as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = pickle.load(f)
self.assertTrue(v3 == v1)
def test3Pickle2(self):
"""
"""
l = 1 << 21
v1 = ds.IntSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l + 1])
v1[0] = 1
v1[2] = 2
v1[1 << 12] = 3
self.assertTrue(v1 == v1)
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(v2 == v1)
v3 = ds.IntSparseIntVect(v2.ToBinary())
self.assertTrue(v2 == v3)
self.assertTrue(v1 == v3)
#pickle.dump(v1,file('isiv.pkl','wb+'))
with open(os.path.join(RDConfig.RDBaseDir, 'Code/DataStructs/Wrap/testData/isiv.pkl'),
'r') as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = pickle.load(f)
self.assertTrue(v3 == v1)
def test4Update(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError, lambda: v1[6])
v1[0] = 1
v1[2] = 2
v1[3] = 3
self.assertTrue(v1 == v1)
v2 = ds.IntSparseIntVect(5)
v2.UpdateFromSequence((0, 2, 3, 3, 2, 3))
self.assertTrue(v1 == v2)
def test5Dice(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
v1[4] = 4
v1[0] = 2
v1[3] = 1
self.assertTrue(feq(ds.DiceSimilarity(v1, v1), 1.0))
v1 = ds.IntSparseIntVect(5)
v1[0] = 2
v1[2] = 1
v1[3] = 4
v1[4] = 6
v2 = ds.IntSparseIntVect(5)
v2[1] = 2
v2[2] = 3
v2[3] = 4
v2[4] = 4
self.assertTrue(feq(ds.DiceSimilarity(v1, v2), 18.0 / 26.))
self.assertTrue(feq(ds.DiceSimilarity(v2, v1), 18.0 / 26.))
def test6BulkDice(self):
"""
"""
sz = 10
nToSet = 5
nVs = 6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0, sz - 1)] = random.randint(1, 10)
vs.append(v)
baseDs = [ds.DiceSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
bulkDs = ds.BulkDiceSimilarity(vs[0], vs[1:])
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i], bulkDs[i]))
def test6BulkTversky(self):
"""
"""
sz = 10
nToSet = 5
nVs = 6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0, sz - 1)] = random.randint(1, 10)
vs.append(v)
baseDs = [ds.TverskySimilarity(vs[0], vs[x], .5, .5) for x in range(1, nVs)]
bulkDs = ds.BulkTverskySimilarity(vs[0], vs[1:], 0.5, 0.5)
diceDs = [ds.DiceSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i], bulkDs[i]))
self.assertTrue(feq(baseDs[i], diceDs[i]))
bulkDs = ds.BulkTverskySimilarity(vs[0], vs[1:], 1.0, 1.0)
taniDs = [ds.TanimotoSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i], taniDs[i]))
taniDs = ds.BulkTanimotoSimilarity(vs[0], vs[1:])
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i], taniDs[i]))
def test7ToList(self):
l = [0]*2048
nbits = 2048
bv = ds.IntSparseIntVect(nbits)
for j in range(nbits):
x = random.randrange(0, nbits)
l[x] = x
bv[x] = x
l2 = list(bv)
l3 = bv.ToList()
self.assertEqual(l, l2)
self.assertEqual(l, l3)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
ali1810.noreply@github.com
|
f37b4202698b801244e4f37eb349143a2286421f
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/070_oop/007_exceptions/_exercises/templates/GoCongr/035_warnings.py
|
64f70487a6f6966148382366c83ea4f50b5aa248
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 682
|
py
|
# # w____
# ________ w____
#
#
# ___ input_body_parameter name unit supposed_maximum
# parameter _ fl.. inp.. 'Enter your @ (in @): '.f.... n.. u...
# __ ? < _ 0:
# r____ V... n.. + ' cannot be negative')
# __ ? > s...
# w____.w... 'suspiciously large value of ' + n..
# r_ ?
#
#
# ___ input_mass
# r_ i... n... _'mass' u... _'kg' s.... _ 100
#
#
# ___ input_height
# r_ i... n.. _ 'height' u... _ 'm' s.... _ 2
#
#
# ___ calculate_bmi mass height
# r_ m... / h.. ** 2)
#
#
# ___ main
# mass _ i._m.
# height _ i._h.
# bmi _ c... mass height
# print('Your body mass index is', ?
#
#
# __ _______ __ ____
# ?
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
b0084b0539780db5582ce0d7f2cdd843f26384e9
|
6defeaa9e3eff61cd861c855ed2f65db2a457564
|
/onmt/keyphrase/shrink_pred_files.py
|
b0f94a88ab0e0f5a110485f683a9c904dd885b63
|
[
"MIT"
] |
permissive
|
memray/OpenNMT-kpg-release
|
50439d2a58d4499b3a4b1d1fdb586d266c4367e7
|
d16bf09e21521a6854ff3c7fe6eb271412914960
|
refs/heads/master
| 2023-08-17T14:32:04.442881
| 2023-01-31T03:24:46
| 2023-01-31T03:24:46
| 213,238,221
| 222
| 34
|
MIT
| 2023-07-22T18:03:01
| 2019-10-06T20:23:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,961
|
py
|
# -*- coding: utf-8 -*-
"""
Some pred files use up too much space, e.g. /zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-topmodels/meng17-one2seq-fullbeam/meng17-one2seq-beam50-maxlen40/pred/kp20k-meng17-verbatim_prepend-rnn-BS64-LR0.05-Layer1-Dim150-Emb100-Dropout0.0-Copytrue-Reusetrue-Covtrue-PEfalse-Contboth-IF1_step_95000/kp20k.pred is 8.3GB, beam=10 size=2.0GB.
So this
"""
import json
import os
__author__ = "Rui Meng"
__email__ = "rui.meng@pitt.edu"
if __name__ == '__main__':
# root_path = ' /zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/transfer_exps/kp/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/transfer_exps/kp_o2o/'
# root_path = '/zfs1/hdaqing/rum20/kp/fairseq-kpg/exps/'
# root_path = '/zfs1/hdaqing/rum20/kp/fairseq-kpg/exps/kp_fewshot10k'
# root_path = '/zfs1/hdaqing/rum20/kp/transfer_exps/kp_fewshot-v2'
root_path = '/zfs1/hdaqing/rum20/kp/transfer_exps/bart_DAFT-v1-DA1e6_FT1e5'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-v3/meng17-one2seq-fullbeam/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-v2/meng17-one2seq-fullbeam/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-topmodels/meng17-one2seq-fullbeam/meng17-one2seq-beam50-maxlen40/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2one/meng17-one2one-kp20k-v3/meng17-one2one-fullbeam/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2one/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/order_matters/transformer/meng17-one2seq-beam50-maxlen40/'
print(root_path)
dataset_line_counts = {
'kp20k': 19987,
# 'kp20k_valid2k': 2000,
'inspec': 500,
'krapivin': 460,
'nus': 211,
'semeval': 100,
# 'duc': 308,
'kp20k_test': 19987,
'openkp_test': 6614,
'kptimes_test': 10000,
'jptimes_test': 10000,
'stackex_test': 16000,
'kp20k_valid2k_test': 2000,
'openkp_valid2k_test': 2000,
'kptimes_valid2k_test': 2000,
'stackex_valid2k_test': 2000,
}
total_size_shrinked = 0
for root, dirs, files in os.walk(root_path, topdown=True):
for filename in files:
# print()
# print('-=' * 50)
# print(filename)
# print('-=' * 50)
'''
Delete report
'''
if filename.endswith('.report'):
dataset_name = filename[:-7].split('-')[-1][5:]
if dataset_name in dataset_line_counts:
report_path = os.path.join(root, filename)
print('Deleting .report: [%s] %s' % (dataset_name, report_path))
ori_size = os.stat(report_path).st_size // 1024 // 1024
print('\t file size = %d MB' % (ori_size))
total_size_shrinked += ori_size
os.remove(report_path)
if filename.endswith('.report.txt'):
dataset_name = filename[:-11]
if dataset_name in dataset_line_counts:
report_path = os.path.join(root, filename)
print('Deleting .report: [%s] %s' % (dataset_name, report_path))
ori_size = os.stat(report_path).st_size // 1024 // 1024
print('\t file size = %d MB' % (ori_size))
total_size_shrinked += ori_size
os.remove(report_path)
'''
Reduce .pred file size
'''
if not filename.endswith('.pred'):
continue
dataset_name = filename[:-5].split('-')[-1][5:]
if dataset_name not in dataset_line_counts: continue
pred_path = os.path.join(root, filename)
print('Shrinking .pred: [%s] %s' % (dataset_name, pred_path))
ori_size = os.stat(pred_path).st_size // 1024 // 1024
print('\t file size = %d MB' % (ori_size))
# ensure the pred is complete
with open(pred_path, 'r') as pred_file:
lines = [l if lid==0 else '' for lid, l in enumerate(pred_file)]
if len(lines) != dataset_line_counts[dataset_name]:
# print('Prediction ongoing, skip!')
continue
pred_dict = json.loads(lines[0])
# not a model output
if 'attns' not in pred_dict:
continue
# indicating it's already shrinked, skip
if pred_dict['src'] == None:
# if pred_dict['attns'] == None and pred_dict['dup_pred_tuples'] == None:
# print('This pred file has been shrinked, skip!')
continue
tmp_pred_path = pred_path + '.tmp'
tmp_pred_file = open(tmp_pred_path, 'w')
with open(pred_path, 'r') as pred_file:
for lid, line in enumerate(pred_file):
try:
pred_dict = json.loads(line)
except:
tmp_pred_file.write(line.strip() + '\n')
print("Error occurs while loading line %d in %s" % (lid, pred_path))
continue
# for k,v in pred_dict.items():
# print('%s' % k)
pred_dict['src'] = None
pred_dict['preds'] = None
# pred_dict['pred_scores'] = None
pred_dict['attns'] = None
pred_dict['copied_flags'] = None
pred_dict['ori_pred_sents'] = None
pred_dict['ori_pred_scores'] = None
pred_dict['ori_preds'] = None
pred_dict['dup_pred_tuples'] = None
tmp_pred_file.write(json.dumps(pred_dict)+'\n')
# tmp_pred_file.close()
print('\tDumped to: ' + pred_path + '.tmp')
new_size = os.stat(tmp_pred_path).st_size // 1024 // 1024
print('\t new file size = %d MB' % (new_size))
print('\t reduced size = %d MB' % (ori_size-new_size))
total_size_shrinked += (ori_size - new_size)
# replace the original file to release space
os.remove(pred_path)
os.rename(tmp_pred_path, pred_path)
print('Total shrinked size = %d MB' % (total_size_shrinked))
|
[
"memray0@gmail.com"
] |
memray0@gmail.com
|
d4a331fd126d3de9e4c2126c8127d132a767d784
|
501176c17ecfda9fc2641c407b044b51364afa8e
|
/BootCamp/python/example/example.py
|
c4a02a67d17d6beae597df85db0c307a24e907bd
|
[] |
no_license
|
melissa-koi/betterbuys
|
fcc6d6bfc1f37a644258d7bcf52eb93597674fd6
|
edc40636c14ee341835bd8f77fd9ae91767b220a
|
refs/heads/main
| 2023-05-26T13:59:59.503289
| 2021-06-10T05:35:43
| 2021-06-10T05:35:43
| 375,577,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
# import sys
#
# name = sys.argv[1]
# print("How old are you?")
# age = int(input())
#
# print(name)
# print(age)
# height = 69
# if height > 70:
# print("You are really tall")
# elif height > 60:
# print("You are of average height")
# else:
# print("You are really short")
# name = ""
# list_a = []
#
# if list_a:
# print("True")
# else:
# print("False")
# list_a = range(0, 5)
# print(list_a)
# for i in range(0, 7):
# print("I would love " + str(i) + " cookies")
# numbers = [1, 2, 3, 4, 5]
# for i in numbers:
# if i % 2 == 0:
# print(i)
# players = 11
# while players >= 5:
# print("The remaining players are ", players)
# players -= 1
# number = 0
# while True:
# print("I love candy " + str(number))
# number += 1
# if number == 7:
# break
# numTaken = [3, 5, 7, 11, 13]
# print("Available Numbers")
#
# for i in range(1, 21):
# if i in numTaken:
# continue # or break
# print(i)
# my_list = []
# my_other_list = list()
# list_a = ["a", "b", "c", "d"] # list of strings
# list_b = [1, 2, 3, 4, 5, 6] # list of numbers
# list_c = [1, "west", 34, "longitude"] # mixed list
# list_d = [ ["a","b","c","d"],[1,2,3,4,5,6],[1,"west",34,"longitude"]] # nested list
#
# list_a.extend(list_b)
# print(list_a)
# print(list_b)
# my_cat = {'name': 'Mr.Sniffles', 'age': 18, 'color': 'black'}
#
# print(my_cat['name'])
# print(my_cat)
#
# print(list(my_cat.keys()))
# print("Enter a string")
# input_string = input()
# characters = {}
#
# for character in input_string:
# characters.setdefault(character, 0)
# characters[character] = characters[character] + 1
#
# print(characters)
# print('What is your name?')
# name = input()
# print('How old are you?')
# age = input()
# print(f"My name is {name} and i am {age} years old")
# name = "James"
# age = 19
# weight = '79' # Kilograms
#
# age_weight_ratio = int(weight)/age
# age_weight_ratio2 = float(weight)/age
#
# print(age_weight_ratio)
# print(age_weight_ratio2)
def fun_a(a=1, b=4):
print(a + b)
fun_a()
def fun_b():
pass
def fun_c(a, b):
return a + b
sum = fun_c(5, 8)
print(sum)
|
[
"melissawangui3@gmail.com"
] |
melissawangui3@gmail.com
|
2102df986d73ba8bded087840712c105503e1d9e
|
1e660c91d0ae300ad6907a97941441fc8e73d5dc
|
/api/models/mixins.py
|
aa77a920c2b5911c3ee17653ec4e9346cb85c4ce
|
[] |
no_license
|
SEUNAGBEYE/Stocky
|
55d65e8ba7e7ff5228863e3c242c6499b2078ca7
|
b2129b0a166a08d14c809cf4e0d711a7c469c91c
|
refs/heads/develop
| 2023-02-23T11:26:46.160005
| 2019-04-01T04:11:06
| 2019-04-01T04:11:06
| 178,017,757
| 0
| 0
| null | 2023-02-07T22:21:11
| 2019-03-27T15:00:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
"""Module for generic model operations mixin."""
from .config import db
class ModelMixin:
"""Mixin class with generic model operations."""
def save(self):
"""
Save a model instance
"""
db.session.add(self)
db.session.commit()
return self
def update_(self, **kwargs):
"""
Updates a record
Args:
kwargs (dict): Key-value pair of the attributes to update
Returns:
(dict) The updated record
"""
for field, value in kwargs.items():
setattr(self, field, value)
db.session.commit()
@classmethod
def get(cls, id):
"""
Gets a record by id
Args:
id (int): Unique identifier for the recod
Returns:
(dict) The found record
"""
return cls.query.get(id)
@classmethod
def get_or_404(cls, id):
"""
Gets a record or return 404
Args:
id (int): Unique identifier for the recod
Returns:
(dict) The found record
Raises:
(exception) Not found exeption if the record does not exist
"""
record = cls.get(id)
if not record:
raise ValidationError(
{
'message':
f'{re.sub(r"(?<=[a-z])[A-Z]+",lambda x: f" {x.group(0).lower()}" , cls.__name__)} not found' # noqa
},
404)
return record
def delete(self):
"""
Soft delete a model instance.
"""
pass
@classmethod
def count(cls):
"""
Returns the number of records that satify a query
"""
return cls.query.count()
@classmethod
def find_or_create(cls, data, **kwargs):
"""
Finds a model instance or creates it
Args:
data (dict): details of the record to be created
Returns:
(dict) The found record or newly created record
"""
instance = cls.query.filter_by(**kwargs).first()
if not instance:
instance = cls(**data).save()
return instance
@classmethod
def bulk_create(cls, objects):
"""
Saves a list of records (dict) to database
Args:
objects (list): List of records to be saved to database
Returns:
(list): A list of the newly created records
"""
resource_list = [cls(**item) for item in objects]
db.session.add_all(resource_list)
db.session.commit()
return resource_list
|
[
"agbeyeseun1@gmail.com"
] |
agbeyeseun1@gmail.com
|
aa0da87f9190e8296e72752194ba5b8957bb36fa
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/meetup/2b4a2462e86149f3a94264f7c35aef7a.py
|
ac0930b773da25cb6f1e91324fa9ea02ed62294a
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
from calendar import monthrange
from datetime import date
def meetup_day(year, month, day_of_the_week, which):
month_length = monthrange(year, month)[1]
days_in_month = (date(year, month, day)
for day in range(1, month_length + 1))
candidates = [date_
for daye_ in days_in_month
if day_name(date_) == day_of_the_week]
if which == 'teenth':
return next(d for d in candidates if 13 <= d.day <= 19)
if which == 'last':
return candidates[-1]
return candidates[int(which[0]) - 1 ]
def day_name(date_):
return date_.strftime('%A')
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
5a3533fe380107f7a518cfd59cc2bc0bf7a77c6a
|
7556542c8c6ae157542300ce45388a8cb0213edb
|
/cocitation/co-citation-finding.py
|
7e0a03491b4cf421e14f206531faccb9b8550960
|
[
"Apache-2.0"
] |
permissive
|
hyyc116/Therapies_finding
|
2229f567c157d17a7ed947d62a78d3487151540c
|
1ee36190e5b85ac89d2836c67ab60c1168c3b1b0
|
refs/heads/master
| 2021-01-17T12:46:32.491077
| 2017-04-06T20:28:45
| 2017-04-06T20:28:45
| 84,074,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,042
|
py
|
#coding:utf-8
import sys
sys.path.append(".")
sys.path.append("..")
from tools.xml_parser import *
reload(sys)
sys.setdefaultencoding('utf-8')
import re
from collections import defaultdict
import json
#Get references
def parse_references_with_index(indexpath):
count =0
for path in open(indexpath):
count+=1
if not path.strip().endswith('.nxml'):
continue
if count%100==1:
sys.stderr.write('{:}\n'.format(count))
path = path.strip()
doc = parse_doc(path)
titles = []
for title in parse_pmc_references(doc):
titles.append(title)
headers = re.sub(r"\s+",' ','. '.join(titles)+".")
doi = parse_pmc_doi(doc)
print doi+"\t"+headers.encode('utf-8')
#get body text
def parse_indexes(indexpath,nplist):
count=0
find_doc_count=0
tf_dic=defaultdict(list)
for path in open(indexpath):
count+=1
if not path.strip().endswith('.nxml'):
continue
if count%10==1:
sys.stderr.write('PROGRESS:{:},'.format(count))
sys.stderr.write('find {:} docs.\n'.format(find_doc_count))
path = path.strip()
content = open(path).read().lower()
if "parkinson's disease" not in content:
continue
find_doc_count+=1
content = parse_body_abstext(path)
content = re.sub(r'\s+'," ",content.replace('-'," ").lower())
for np in nplist:
if np in content:
tf_dic[np].append(path)
open("parkinson-tf.dict",'w').write(json.dumps(tf_dic))
for np in tf_dic.keys():
print np+"\t"+str(len(set(tf_dic[np])))
def parse_body_abstext(path):
doc = parse_doc(path)
content = doc.select('sec p')
# abstext = doc.select('abstract')[0].get_text()
ps=[]
for p in content:
ps.append(re.sub(r'\s+'," ",p.get_text()))
return " ".join(ps)
def score_therapies(df_path,tf_path):
df_dict=defaultdict(int)
tf_dict = defaultdict(int)
for line in open(df_path):
splits = line.split("\t")
therapy = re.sub(r'\s+'," ",splits[0].replace("-"," "))
df_dict[therapy]=int(splits[2])
for line in open(tf_path):
splits = line.split("\t")
tf_dict[splits[0]] = int(splits[1])
results=defaultdict(float)
for t in df_dict.keys():
tf = tf_dict.get(t,0.5)
results[t]=df_dict[t]/float(tf)
for k,v in sorted(results.items(),key=lambda x:x[1],reverse=True):
print "{:}\t{:.5f}\t{:}\t{:}".format(k,v,df_dict[k],tf_dict.get(k,0.5))
if __name__=="__main__":
clas = sys.argv[1]
if clas=='ref':
parse_references_with_index(sys.argv[1])
elif clas=='tf':
indexpath=sys.argv[2]
dfpath=sys.argv[3]
nplist = [re.sub(r'\s+'," ",line.strip().split('\t')[0].replace("-"," ")) for line in open(dfpath)]
parse_indexes(indexpath,nplist)
elif clas=='score':
score_therapies(sys.argv[2],sys.argv[3])
|
[
"hyyc116@gmail.com"
] |
hyyc116@gmail.com
|
1487f463b36ac15949892d9d13ee5fa6dc48ad37
|
c573cac75d4e34263fa29d3efccb76199be0af98
|
/4/A.py
|
c3fa91a5b7637c8e587fb001b43db17bffc6807c
|
[] |
no_license
|
se2313se/Ya.algorithms_training
|
b197a0d1f786b0a250de9420965f48436b92ca6a
|
c52a0ca53f8a807abc943fa60b5b178754118141
|
refs/heads/main
| 2023-06-08T23:03:40.853383
| 2021-06-24T17:21:07
| 2021-06-24T17:21:07
| 380,001,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
with open('input.txt', 'r', encoding='utf8') as f:
synonyms = dict()
n = int(f.readline())
for i in range(n):
tempWord, tempSynonyms = f.readline().split()
synonyms[tempWord] = tempSynonyms
synonyms[tempSynonyms] = tempWord
print(synonyms[f.readline().strip()])
|
[
"71695356+se2313se@users.noreply.github.com"
] |
71695356+se2313se@users.noreply.github.com
|
15ebe1a3991b7c2926af485aac68c164facd7718
|
adbf09a31415e6cf692ff349bd908ea25ded42a8
|
/widgets/hello.py
|
1f431dbfab5106918d3f455f654bdbbf17576618
|
[] |
no_license
|
cmulliss/gui_python
|
53a569f301cc82b58880c3c0b2b415fad1ecc3f8
|
6c83d8c2e834464b99024ffd8cf46ac4e734e7a4
|
refs/heads/main
| 2023-08-12T22:33:01.596005
| 2021-10-11T12:35:41
| 2021-10-11T12:35:41
| 408,176,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
import tkinter as tk
from tkinter import ttk
# main window is going to be called root
# Tk is creating an object, the main window
# .pack() puts the text into the window
root = tk.Tk()
root.title("hello World")
ttk.Label(root, text="Hello World", padding=(30, 10)).pack()
# tells it to start running and continues until you close your window
root.mainloop()
|
[
"cmulliss@gmail.com"
] |
cmulliss@gmail.com
|
79060db8148d189e49d71a2fcde2a58110cad683
|
d4f05d51568bfda9fb964deba92d9fd599a3dcde
|
/desing_pattern/factory_method/idcard.py
|
d696179da0e2206fdb2814b3f87a9e6356415882
|
[] |
no_license
|
Fullmoon8507/PythonPracticeProject
|
44beba7ce783e5e22429516d39ee96adc1ead785
|
57454099ad67bfe4431ee997fada640fde6ccecc
|
refs/heads/master
| 2020-04-16T23:29:58.907552
| 2017-05-06T07:27:35
| 2017-05-06T07:27:35
| 53,178,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from product import Product
class IDCard(Product):
def __init__(self, owner):
self.__owner = owner
print(self.__owner + 'のカードを作成します')
def use(self):
print(self.__owner + 'のカードを使います')
def get_owner(self):
return self.__owner
|
[
"you@example.com"
] |
you@example.com
|
8a4d5bed883776ebcd3fcc904288d9add338fef0
|
584f7b51d7cd529448e2fc0147557e26931ab17e
|
/test_Begin_dtype.py
|
94c25b201a1b4bb74e965f1d89a9301ac63f4647
|
[
"BSD-3-Clause"
] |
permissive
|
opticspy/lightpipes
|
8ca0d2221a1b893de5e51fec9061e90b9145f5f8
|
f4ffdedb3ab2f9b5ae5a9a8e37985d2a7f8bb2ef
|
refs/heads/master
| 2023-09-04T19:07:11.376631
| 2023-09-04T15:24:55
| 2023-09-04T15:24:55
| 80,127,706
| 191
| 55
|
BSD-3-Clause
| 2023-08-23T00:45:33
| 2017-01-26T15:39:28
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
#! /usr/bin/env python
"""
Script to test the Begin command with dtype option.
"""
from LightPipes import *
import numpy as np
import sys
wavelength = 500*nm
size = 25*mm
N = 3000
N2=int(N/2)
w0=2*mm
print("LightPipes version = ", LPversion)
print("without dtype option:")
F=Begin(size,wavelength,N)
print("type of F:",F._dtype)
print("size of F.field: ",sys.getsizeof(F.field)/1e9," Gbyte")
print("\n")
print("with dtype option:")
F=Begin(size,wavelength,N,dtype=np.complex64)
print("type of F:",F._dtype)
print("size of F.field: ",sys.getsizeof(F.field)/1e9," Gbyte")
|
[
"fred511949@gmail.com"
] |
fred511949@gmail.com
|
dd7bda05324df1c30a70004bdcf169a29b9a972f
|
b76615ff745c6d66803506251c3d4109faf50802
|
/pyobjc-framework-SpriteKit/PyObjCTest/test_skview.py
|
96b626096078794678e9693ea10f2b0c41775b58
|
[
"MIT"
] |
permissive
|
danchr/pyobjc-git
|
6ef17e472f54251e283a0801ce29e9eff9c20ac0
|
62b787fddeb381184043c7ff136f1c480755ab69
|
refs/heads/master
| 2021-01-04T12:24:31.581750
| 2020-02-02T20:43:02
| 2020-02-02T20:43:02
| 240,537,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
import sys
from PyObjCTools.TestSupport import *
import objc
if sys.maxsize > 2 ** 32:
import SpriteKit
class TestSKViewHelper(SpriteKit.NSObject):
def view_shouldRenderAtTime_(self, v, t):
return 1
class TestSKView(TestCase):
@min_os_level("10.9")
def testMethods10_9(self):
self.assertArgIsBOOL(SpriteKit.SKView.setPaused_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.isPaused)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsFPS_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsFPS)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsDrawCount_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsDrawCount)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsNodeCount_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsNodeCount)
self.assertArgIsBOOL(SpriteKit.SKView.setAsynchronous_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.isAsynchronous)
self.assertArgIsBOOL(SpriteKit.SKView.setIgnoresSiblingOrder_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.ignoresSiblingOrder)
@min_os_level("10.10")
def testMethods10_10(self):
self.assertArgIsBOOL(SpriteKit.SKView.setShowsFields_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsFields)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsPhysics_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsPhysics)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsQuadCount_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsQuadCount)
self.assertArgIsBOOL(SpriteKit.SKView.setAllowsTransparency_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.allowsTransparency)
self.assertArgIsBOOL(SpriteKit.SKView.setShouldCullNonVisibleNodes_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.shouldCullNonVisibleNodes)
@min_sdk_level("10.12")
def testProtocols(self):
objc.protocolNamed("SKViewDelegate")
self.assertResultIsBOOL(TestSKViewHelper.view_shouldRenderAtTime_)
self.assertArgHasType(
TestSKViewHelper.view_shouldRenderAtTime_, 1, objc._C_DBL
)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
87df33662bfa4926caa32f3b3fb0907ed1ddbc37
|
32226e72c8cbaa734b2bdee081c2a2d4d0322702
|
/experiments/state_distance/optimal_control_with_q.py
|
e6785e1a4453bcf63958d1b547ffd1074ec35676
|
[
"MIT"
] |
permissive
|
Asap7772/rail-rl-franka-eval
|
2b1cbad7adae958b3b53930a837df8a31ab885dc
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
refs/heads/master
| 2022-11-15T07:08:33.416025
| 2020-07-12T22:05:32
| 2020-07-12T22:05:32
| 279,155,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,763
|
py
|
"""
Choose action according to
a = argmax_{a, s'} r(s, a, s') s.t. Q(s, a, s') = 0
where r is defined specifically for the reacher env.
"""
import argparse
import joblib
import numpy as np
from railrl.state_distance.policies import (
SoftOcOneStepRewardPolicy,
TerminalRewardSampleOCPolicy,
ArgmaxQFPolicy,
PseudoModelBasedPolicy,
SamplePolicyPartialOptimizer)
from railrl.samplers.util import rollout
from railrl.torch.pytorch_util import set_gpu_mode
from railrl.core import logger
def experiment(variant):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=500,
help='Max length of rollout')
parser.add_argument('--num_rollouts', type=int, default=1,
help='Number of rollouts per eval')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--argmax', action='store_true')
parser.add_argument('--hide', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--planh', type=int, default=1,
help='Planning horizon')
parser.add_argument('--discount', type=float, help='Discount Factor')
parser.add_argument('--weight', type=float, default=1.,
help='Constraint penalty weight')
parser.add_argument('--nsamples', type=int, default=100,
help='Number of samples for optimization')
parser.add_argument('--ngrad', type=int, default=0,
help='Number of gradient steps for respective policy.')
parser.add_argument('--mb', action='store_true',
help='Use (pseudo-)model-based policy')
parser.add_argument('--partial', action='store_true',
help='Use partial state optimizer')
parser.add_argument('--grid', action='store_true',
help='Sample actions from a grid')
parser.add_argument('--dt', help='decrement tau', action='store_true')
parser.add_argument('--cycle', help='cycle tau', action='store_true')
args = parser.parse_args()
data = joblib.load(args.file)
print("Done loading")
env = data['env']
qf = data['qf']
if args.gpu:
set_gpu_mode(True)
qf.to(ptu.device)
qf.train(False)
print("Env type:", type(env))
if args.argmax:
policy = ArgmaxQFPolicy(
qf,
env,
sample_size=args.nsamples,
num_gradient_steps=args.ngrad,
sample_actions_from_grid=args.grid,
)
elif args.mb:
policy = PseudoModelBasedPolicy(
qf,
env,
sample_size=args.nsamples,
)
elif args.partial:
policy = SamplePolicyPartialOptimizer(
qf,
env,
data['policy'],
sample_size=args.nsamples,
)
elif args.planh == 1:
policy = SoftOcOneStepRewardPolicy(
qf,
env,
data['policy'],
constraint_weight=args.weight,
sample_size=args.nsamples,
verbose=args.verbose,
sample_actions_from_grid=args.grid,
)
else:
policy = TerminalRewardSampleOCPolicy(
qf,
env,
horizon=args.planh,
constraint_weight=args.weight,
sample_size=args.nsamples,
verbose=args.verbose,
)
discount = 0
if args.discount is not None:
print("WARNING: you are overriding the discount factor. Right now "
"only discount = 0 really makes sense.")
discount = args.discount
init_tau = discount
while True:
paths = []
tau = init_tau
policy.set_tau(tau)
for _ in range(args.num_rollouts):
goal = env.sample_goal_for_rollout()
if args.verbose:
env.print_goal_state_info(goal)
env.set_goal(goal)
policy.set_goal(goal)
path = rollout(
env,
policy,
max_path_length=args.H,
animated=not args.hide,
)
path['goal_states'] = np.repeat(
np.expand_dims(goal, 0),
len(path['observations']),
0,
)
paths.append(path)
tau -= 1
if tau < 0:
if args.cycle:
tau = init_tau
else:
tau = 0
policy.set_tau(tau)
env.log_diagnostics(paths)
logger.dump_tabular()
|
[
"asap7772@berkeley.edu"
] |
asap7772@berkeley.edu
|
7539f89d65e13d8d08aa52f5ad2cb95edad6e77c
|
572dd7f851ff2f6b39fea8f99199c22260f113df
|
/user/messages/success.py
|
b4e779b05fd4003a8e96f5153edf170b46c1ee00
|
[] |
no_license
|
SEUNAGBEYE/Flighty
|
f869f3fb1c1c74bddff9102b11a02411f502dc52
|
46247f93e7f9c83441c3f50eaca2f0d3eaeca96f
|
refs/heads/develop
| 2022-12-13T12:17:58.760670
| 2019-07-29T15:51:36
| 2019-07-29T15:51:36
| 165,585,170
| 0
| 0
| null | 2022-12-08T01:36:58
| 2019-01-14T02:52:46
|
Python
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
USER_CREATED = 'User successfully created'
LOGIN_SUCCESSFULL = 'User sucessfully logged in'
PROFILE_UPDATED = 'Profile updated'
USER_RETRIEVED = 'User successfully fetched'
|
[
"agbeyeseun1@gmail.com"
] |
agbeyeseun1@gmail.com
|
82332f085a0ce0530c27abb8493eb16799f8861a
|
44e8334e1b17fda7f60d9760f59868a9227e2ab0
|
/python-tf/tf2/tf2-10-0-mnist.py
|
1510bd8a4542793f25cbb4c7648fb41506d3382a
|
[] |
no_license
|
MysteriousSonOfGod/python-3
|
47c2aa69a84ba78876c74bc6f2e7e6f3093df1e2
|
a303a5284c40f3cb96a8082a1f5ed80773b66336
|
refs/heads/master
| 2023-02-16T18:21:46.153388
| 2021-01-13T10:55:14
| 2021-01-13T10:55:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
# Lab 7 Learning rate and Evaluation
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import images.image
learning_rate = 0.001
training_epochs = 15 # total training data을 한 번 train = 1 epoch
batch_size = 100 # 모든 데이터를 처리하지 않고 처리할 묶은 건수
# 모든데이터가 1000 이고 batch_size 100이면 1 epoch할려면 10번 반복작업이 실행됨
nb_classes = 10
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test_org) = mnist.load_data()
# 훈련 세트에 있는 첫 번째 이미지를 보면 픽셀 값의 범위가 0~255 사이
plt.figure()
plt.imshow(x_train[0])
plt.colorbar()
plt.grid(False)
images.image.save_fig("tf2.10.0.mnist_train_images")
plt.show()
# normalizing data
x_train, x_test_normal = x_train / 255.0, x_test / 255.0
# 훈련 세트에서 처음 25개 이미지와 그 아래 클래스 이름을 출력
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(y_train[i])
images.image.save_fig("tf2.10.0.mnist_train_images1_25")
plt.show()
|
[
"cbaeck1@gmail.com"
] |
cbaeck1@gmail.com
|
d7d9397514f924e2e3c51219055782d39055529b
|
f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e
|
/scripts/client/gui/scaleform/daapi/view/lobby/lobbyview.py
|
ccca6333bda22faa53d118768576e781414e63cf
|
[] |
no_license
|
webiumsk/WOT0.10.0
|
4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b
|
a84f536c73f86d9e8fab559e97f88f99f2ad7e95
|
refs/heads/master
| 2021-01-09T21:55:00.662437
| 2015-10-23T20:46:45
| 2015-10-23T20:46:45
| 44,835,654
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,690
|
py
|
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/LobbyView.py
import BigWorld
import VOIP
import constants
import CommandMapping
from PlayerEvents import g_playerEvents
from gui import game_control, SystemMessages
import gui
from gui.LobbyContext import g_lobbyContext
from gui.battle_control import g_sessionProvider
from gui.Scaleform.daapi.view.meta.LobbyPageMeta import LobbyPageMeta
from gui.Scaleform.framework.entities.View import View
from gui.Scaleform.genConsts.FORTIFICATION_ALIASES import FORTIFICATION_ALIASES
from gui.Scaleform.genConsts.PREBATTLE_ALIASES import PREBATTLE_ALIASES
from gui.Scaleform.locale.SYSTEM_MESSAGES import SYSTEM_MESSAGES
from gui.prb_control.dispatcher import g_prbLoader
from gui.shared.ItemsCache import g_itemsCache
from gui.shared.utils.HangarSpace import g_hangarSpace
from gui.shared import EVENT_BUS_SCOPE, events, event_dispatcher as shared_events
from gui.Scaleform.framework import ViewTypes
from gui.Scaleform.Waiting import Waiting
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.shared.utils.functions import getViewName
from helpers import i18n
class LobbyView(LobbyPageMeta):
VIEW_WAITING = (VIEW_ALIAS.LOBBY_HANGAR,
VIEW_ALIAS.LOBBY_INVENTORY,
VIEW_ALIAS.LOBBY_SHOP,
VIEW_ALIAS.LOBBY_PROFILE,
VIEW_ALIAS.LOBBY_BARRACKS,
PREBATTLE_ALIASES.TRAINING_LIST_VIEW_PY,
PREBATTLE_ALIASES.TRAINING_ROOM_VIEW_PY,
VIEW_ALIAS.LOBBY_CUSTOMIZATION,
VIEW_ALIAS.LOBBY_RESEARCH,
VIEW_ALIAS.LOBBY_TECHTREE,
FORTIFICATION_ALIASES.FORTIFICATIONS_VIEW_ALIAS,
VIEW_ALIAS.BATTLE_QUEUE,
VIEW_ALIAS.BATTLE_LOADING)
class COMPONENTS:
HEADER = 'lobbyHeader'
def __init__(self, ctx = None):
super(LobbyView, self).__init__(ctx)
self.__currIgrType = constants.IGR_TYPE.NONE
def getSubContainerType(self):
return ViewTypes.LOBBY_SUB
def _populate(self):
View._populate(self)
self.__currIgrType = gui.game_control.g_instance.igr.getRoomType()
g_prbLoader.setEnabled(True)
self.addListener(events.LobbySimpleEvent.SHOW_HELPLAYOUT, self.__showHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.addListener(events.LobbySimpleEvent.CLOSE_HELPLAYOUT, self.__closeHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.addListener(events.GameEvent.SCREEN_SHOT_MADE, self.__handleScreenShotMade, EVENT_BUS_SCOPE.GLOBAL)
g_playerEvents.onVehicleBecomeElite += self.__onVehicleBecomeElite
self.app.loaderManager.onViewLoadInit += self.__onViewLoadInit
self.app.loaderManager.onViewLoaded += self.__onViewLoaded
self.app.loaderManager.onViewLoadError += self.__onViewLoadError
game_control.g_instance.igr.onIgrTypeChanged += self.__onIgrTypeChanged
self.__showBattleResults()
battlesCount = g_itemsCache.items.getAccountDossier().getTotalStats().getBattlesCount()
g_lobbyContext.updateBattlesCount(battlesCount)
self.fireEvent(events.GUICommonEvent(events.GUICommonEvent.LOBBY_VIEW_LOADED))
keyCode = CommandMapping.g_instance.get('CMD_VOICECHAT_MUTE')
if not BigWorld.isKeyDown(keyCode):
VOIP.getVOIPManager().setMicMute(True)
def _dispose(self):
game_control.g_instance.igr.onIgrTypeChanged -= self.__onIgrTypeChanged
self.app.loaderManager.onViewLoadError -= self.__onViewLoadError
self.app.loaderManager.onViewLoaded -= self.__onViewLoaded
self.app.loaderManager.onViewLoadInit -= self.__onViewLoadInit
g_playerEvents.onVehicleBecomeElite -= self.__onVehicleBecomeElite
self.removeListener(events.LobbySimpleEvent.SHOW_HELPLAYOUT, self.__showHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.removeListener(events.LobbySimpleEvent.CLOSE_HELPLAYOUT, self.__closeHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.removeListener(events.GameEvent.SCREEN_SHOT_MADE, self.__handleScreenShotMade, EVENT_BUS_SCOPE.GLOBAL)
View._dispose(self)
def __showHelpLayout(self, _):
self.as_showHelpLayoutS()
def __closeHelpLayout(self, _):
self.as_closeHelpLayoutS()
def __handleScreenShotMade(self, event):
if 'path' not in event.ctx:
return
SystemMessages.pushMessage(i18n.makeString('#menu:screenshot/save') % {'path': event.ctx['path']}, SystemMessages.SM_TYPE.Information)
def __onVehicleBecomeElite(self, vehTypeCompDescr):
self.fireEvent(events.LoadViewEvent(VIEW_ALIAS.ELITE_WINDOW, getViewName(VIEW_ALIAS.ELITE_WINDOW, vehTypeCompDescr), {'vehTypeCompDescr': vehTypeCompDescr}), EVENT_BUS_SCOPE.LOBBY)
def moveSpace(self, dx, dy, dz):
if g_hangarSpace.space:
g_hangarSpace.space.handleMouseEvent(int(dx), int(dy), int(dz))
def notifyCursorOver3dScene(self, isOver3dScene):
self.fireEvent(events.LobbySimpleEvent(events.LobbySimpleEvent.NOTIFY_CURSOR_OVER_3DSCENE, ctx={'isOver3dScene': isOver3dScene}))
def __onViewLoadInit(self, view):
if view is not None and view.settings is not None:
self.__subViewTransferStart(view.settings.alias)
return
def __onViewLoaded(self, view):
if view is not None and view.settings is not None:
self.__subViewTransferStop(view.settings.alias)
return
def __onViewLoadError(self, name, msg, item):
if item is not None and item.pyEntity is not None:
self.__subViewTransferStop(item.pyEntity.settings.alias)
return
def __onIgrTypeChanged(self, roomType, xpFactor):
icon = gui.makeHtmlString('html_templates:igr/iconSmall', 'premium')
if roomType == constants.IGR_TYPE.PREMIUM:
SystemMessages.pushMessage(i18n.makeString(SYSTEM_MESSAGES.IGR_CUSTOMIZATION_BEGIN, igrIcon=icon), type=SystemMessages.SM_TYPE.Information)
elif roomType in [constants.IGR_TYPE.BASE, constants.IGR_TYPE.NONE] and self.__currIgrType == constants.IGR_TYPE.PREMIUM:
SystemMessages.pushMessage(i18n.makeString(SYSTEM_MESSAGES.IGR_CUSTOMIZATION_END, igrIcon=icon), type=SystemMessages.SM_TYPE.Information)
self.__currIgrType = roomType
def __subViewTransferStart(self, alias):
if alias in self.VIEW_WAITING:
Waiting.show('loadPage')
def __subViewTransferStop(self, alias):
if alias != VIEW_ALIAS.BATTLE_LOADING and alias in self.VIEW_WAITING:
Waiting.hide('loadPage')
def __showBattleResults(self):
battleCtx = g_sessionProvider.getCtx()
if battleCtx.lastArenaUniqueID:
shared_events.showMyBattleResults(battleCtx.lastArenaUniqueID)
battleCtx.lastArenaUniqueID = None
return
|
[
"info@webium.sk"
] |
info@webium.sk
|
11dfb9beb211a5842f05475135524472e63b0052
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/compute/azure-mgmt-avs/generated_samples/workload_networks_get.py
|
60db6d3b5326e38bb0efaea0f5d34f54b45f667d
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.avs import AVSClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-avs
# USAGE
python workload_networks_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AVSClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.workload_networks.get(
resource_group_name="group1",
private_cloud_name="cloud1",
workload_network_name="default",
)
print(response)
# x-ms-original-file: specification/vmware/resource-manager/Microsoft.AVS/stable/2022-05-01/examples/WorkloadNetworks_Get.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
aed27d9f42e5ddf4ac6f352e7d7d2b88f8f3a672
|
4eb3ff3e56043bc20162a59039af37533432feb1
|
/项目所用模块.py
|
1205da794e0b83ed65e541fe40c0fafae5ead37b
|
[] |
no_license
|
luofang0212/flask_test
|
99787a43ba117b0e5684f811ad9f83442c6e95cb
|
e9ea8644f7bbae94c0b689b79235913f73da7124
|
refs/heads/master
| 2023-07-26T00:49:53.681815
| 2021-09-06T16:15:11
| 2021-09-06T16:15:11
| 403,010,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 本项目所用到的模块
''''''
'''
python 自带包
'''
'''
python 第三方库
'''
from flask import Flask
from flask import render_template
from flask import request
import jieba # 分词
from matplotlib import pyplot as plt #绘图,数据可视化
from PIL import Image #图片处理
import numpy as np #矩阵运算
import pymysql # mysql 数据库驱动
from wordcloud import WordCloud #词云
|
[
"warm_homel@163.com"
] |
warm_homel@163.com
|
654f21379131c530e86ac551da8784b4feab6062
|
7e2d802a17e42d50974af29e4c9b658d5da6471b
|
/HiredInTech/08-cover-the-border.py
|
e684983873eb1f32812cae3542721e131940be47
|
[] |
no_license
|
siddharthadtt1/Leet
|
a46290bacdf569f69d523413c0129676727cb20e
|
1d8b96257f94e16d0c1ccf8d8e8cd3cbd9bdabce
|
refs/heads/master
| 2020-06-20T16:21:15.915761
| 2017-05-15T22:35:42
| 2017-05-15T22:35:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
''' HiredInTech solution '''
def cover_the_border(l, radars):
# Example arguments:
# l = 100
# radars = [ [5, 10], [3, 25], [46, 99], [39, 40], [45, 50] ]
if len(radars) < 1:
return 0
endpoints = []
for end in radars:
endpoints.append([end[0], 0])
endpoints.append([end[1], 1])
endpoints.sort(key = lambda endpoint: endpoint[0])
open_count = 0
last_open = 0
covered = 0
for endpoint in endpoints:
if endpoint[1] == 0:
open_count += 1
if open_count == 1:
last_open = endpoint[0]
else:
open_count -= 1
if open_count == 0:
covered += endpoint[0] - last_open
return covered
''' My solution '''
def cover_the_border_my(l, radars):
# Example arguments:
# l = 100
# radars = [ [5, 10], [3, 25], [46, 99], [39, 40], [45, 50] ]
if len(radars) < 1:
return 0
radars.sort(key = lambda radar: radar[0])
result = []
result.append(radars[0])
j = 0 # last in the result
for i in range(1, len(radars)):
merged = [0, 0]
if can_merge(result[j], radars[i], merged):
result[j][0] = merged[0]
result[j][1] = merged[1]
else:
result.append(radars[i])
j += 1
covered = 0
for seg in result:
covered += seg[1] - seg[0]
return covered
# if seg L1 and seg L2 can merge. if true, merge segs into R
# L1[0] <= L2[0]
def can_merge(L1, L2, R):
if L1[1] < L2[0]:
return False
else:
R[0] = L1[0]
R[1] = max(L1[1], L2[1])
return True
radars = [ [5, 10], [3, 25], [46, 99], [39, 40], [45, 50] ]
print cover_the_border(100, radars)
|
[
"me@example.com"
] |
me@example.com
|
171eaf38d54a5fe7dcf2a23a97cf6c845c890e8d
|
cff5ac961d717059caf25dc4247ddcc958f27d24
|
/WRAPPERS/corrmat_from_regionalmeasures.py
|
1be8e37d8ef4eb4850aaa0156d84101d217d6c98
|
[
"MIT"
] |
permissive
|
repropaper/NSPN_WhitakerVertes_PNAS2016
|
fac4a9bb72e92db2d38b5c41e431e998c8114030
|
5c9c46caf91768d4cadec2b24078b640f05d3d76
|
refs/heads/reprobranch
| 2020-03-19T00:17:24.346727
| 2017-02-23T08:24:50
| 2017-02-23T08:24:50
| 135,469,739
| 0
| 1
|
MIT
| 2018-05-30T17:08:00
| 2018-05-30T16:26:32
|
OpenEdge ABL
|
UTF-8
|
Python
| false
| false
| 6,687
|
py
|
#!/usr/bin/env python
#=============================================================================
# Created by Kirstie Whitaker
# at Hot Numbers coffee shop on Trumpington Road in Cambridge, September 2016
# Contact: kw401@cam.ac.uk
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import os
import sys
import argparse
import textwrap
import numpy as np
import pandas as pd
sys.path.append(os.path.join(os.path.dirname(__file__), '../SCRIPTS/'))
import make_corr_matrices as mcm
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Also allows you to change some settings
'''
# Build a basic parser.
help_text = (('Generate a structural correlation matrix from an input csv file,\n')+
('a list of region names and (optional) covariates.'))
sign_off = 'Author: Kirstie Whitaker <kw401@cam.ac.uk>'
parser = argparse.ArgumentParser(description=help_text,
epilog=sign_off,
formatter_class=argparse.RawTextHelpFormatter)
# Now add the arguments
parser.add_argument(dest='regional_measures_file',
type=str,
metavar='regional_measures_file',
help=textwrap.dedent(('CSV file that contains regional values for each participant.\n')+
('Column labels should be the region names or covariate variable\n')+
('names. All participants in the file will be included in the\n')+
('correlation matrix.')))
parser.add_argument(dest='names_file',
type=str,
metavar='names_file',
help=textwrap.dedent(('Text file that contains the names of each region to be included\n')+
('in the correlation matrix. One region name on each line.')))
parser.add_argument(dest='output_name',
type=str,
metavar='output_name',
help=textwrap.dedent(('File name of the output correlation matrix.\n')+
('If the output directory does not yet exist it will be created.')))
parser.add_argument('--covars_file',
type=str,
metavar='covars_file',
help=textwrap.dedent(('Text file that contains the names of variables that should be\n')+
('covaried for each regional measure before the creation of the\n')+
('correlation matrix. One variable name on each line.\n')+
(' Default: None')),
default=None)
parser.add_argument('--names_308_style',
action='store_true',
help=textwrap.dedent(('Include this flag if your names are in the NSPN 308\n')+
('parcellation style (which means you have 41 subcortical regions)\n')+
('that are still in the names files and that\n')+
('the names are in <hemi>_<DK-region>_<part> format.\n')+
(' Default: False')),
default=False)
arguments = parser.parse_args()
return arguments, parser
def read_in_data(regional_measures_file, names_file, covars_file=None, names_308_style=True):
'''
Read in the data from the three input files:
* regional_measures_file
* names_file
* covars_file
If the names are in 308 style then drop the first 41 entries from the names
and covars files
'''
# Load the input files
df = pd.read_csv(regional_measures_file)
names = [ line.strip() for line in open(names_file) ]
if covars_file:
covars_list = [ line.strip() for line in open(covars_file) ]
else:
covars_list = []
# If you have your names in names_308_style you need to strip the
# first 41 items
if names_308_style:
names = names[41:]
# You may also have to strip the words "thickness" from the
# end of the names in the data frame
if names_308_style:
df.columns = [ col.rsplit('_thickness', 1)[0] for col in df.columns ]
return df, names, covars_list
def corrmat_from_regionalmeasures(regional_measures_file,
names_file,
covars_file,
output_name,
names_308_style=False):
'''
This is the big function!
It reads in the CSV file that contains the regional measures for each
participant, the names file and the list of covariates.
Then it creates the correlation matrix and writes it out to the output_dir
as a txt file.
'''
# Read in the data
df, names, covars_list = read_in_data(regional_measures_file,
names_file,
covars_file=covars_file,
names_308_style=names_308_style)
# Make your correlation matrix correcting for all the covariates
M = mcm.create_corrmat(df, names, covars_list)
# Save the corrmat
mcm.save_mat(M, output_name)
if __name__ == "__main__":
# Read in the command line arguments
arg, parser = setup_argparser()
# Now run the main function :)
corrmat_from_regionalmeasures(arg.regional_measures_file,
arg.names_file,
arg.covars_file,
arg.output_name,
names_308_style=arg.names_308_style)
#=============================================================================
# Wooo! All done :)
#=============================================================================
|
[
"kw401@cam.ac.uk"
] |
kw401@cam.ac.uk
|
a0264de9f564b1eddb8d60d387ccf898539bcc2f
|
c544a5c24b4adedd2c1602894acf5dcafe64ed6f
|
/astropy_helpers/tests/test_utils.py
|
ad76e4f5b54a24cc076aaa835620bb1827d6aac3
|
[] |
permissive
|
astropy/astropy-helpers
|
b6053f673f517e11ccf243d1ffe1e685b9b8ebe7
|
3b45ed3191ceb45c574db304ec0f33282d2e4a98
|
refs/heads/master
| 2023-08-20T04:51:42.767065
| 2022-05-25T16:38:43
| 2022-05-25T16:38:43
| 14,448,779
| 30
| 40
|
BSD-3-Clause
| 2022-05-25T16:36:17
| 2013-11-16T15:01:42
|
Python
|
UTF-8
|
Python
| false
| false
| 751
|
py
|
import os
from ..utils import find_data_files
def test_find_data_files(tmpdir):
data = tmpdir.mkdir('data')
sub1 = data.mkdir('sub1')
sub2 = data.mkdir('sub2')
sub3 = sub1.mkdir('sub3')
for directory in (data, sub1, sub2, sub3):
filename = directory.join('data.dat').strpath
with open(filename, 'w') as f:
f.write('test')
filenames = find_data_files(data.strpath, '**/*.dat')
filenames = sorted(os.path.relpath(x, data.strpath) for x in filenames)
assert filenames[0] == os.path.join('data.dat')
assert filenames[1] == os.path.join('sub1', 'data.dat')
assert filenames[2] == os.path.join('sub1', 'sub3', 'data.dat')
assert filenames[3] == os.path.join('sub2', 'data.dat')
|
[
"thomas.robitaille@gmail.com"
] |
thomas.robitaille@gmail.com
|
228b3233ec8da4230a696814daf44cbb9a316673
|
6657a43ee360177e578f67cf966e6aef5debda3c
|
/varsom_avalanche_client/configuration.py
|
4f8c074c8e547f5533c6988b72af1a1db6d5d8c1
|
[
"MIT"
] |
permissive
|
NVE/python-varsom-avalanche-client
|
3cc8b9c366f566a99c6f309ccdfb477f73256659
|
c7787bf070d8ea91efd3a2a9e7782eedd4961528
|
refs/heads/master
| 2022-04-20T09:32:24.499284
| 2020-04-16T20:12:01
| 2020-04-16T20:12:01
| 256,318,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,046
|
py
|
# coding: utf-8
"""
Snøskredvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v5.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "https://api01.nve.no/hydrology/forecast/avalanche/v5.0.1"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("varsom_avalanche_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v5.0.1\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
|
[
"jorgen.kvalberg@gmail.com"
] |
jorgen.kvalberg@gmail.com
|
7d5314a98029672f01fe722b58e29b81bd0a8f69
|
9dc5c9dd8bff75a17eb27c75a85f85f1515efbe1
|
/examples/competitive/sofm_compare_grid_types.py
|
4f33caf37872c3184f5a4581bd45137fb39d3099
|
[
"MIT"
] |
permissive
|
BickyMz/neupy
|
667a688a3f1f3c9c515376eb2fc32446185230a9
|
3ceb25d3b9f6c00c0b25ef65a25434126006098d
|
refs/heads/master
| 2020-05-24T16:25:03.803826
| 2019-05-05T12:55:46
| 2019-05-05T12:55:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
import matplotlib.pyplot as plt
from neupy import algorithms, utils
from helpers import plot_2d_grid, make_circle
plt.style.use('ggplot')
utils.reproducible()
if __name__ == '__main__':
GRID_WIDTH = 10
GRID_HEIGHT = 10
configurations = [{
'grid_type': 'hexagon',
'use_hexagon_grid': True,
'title': 'Using hexagon grid',
}, {
'grid_type': 'rect',
'use_hexagon_grid': False,
'title': 'Using regcangular grid',
}]
data = make_circle()
red, blue = ('#E24A33', '#348ABD')
n_columns = len(configurations)
plt.figure(figsize=(12, 5))
for index, conf in enumerate(configurations, start=1):
sofm = algorithms.SOFM(
n_inputs=2,
features_grid=(GRID_HEIGHT, GRID_WIDTH),
verbose=True,
shuffle_data=True,
grid_type=conf['grid_type'],
learning_radius=8,
reduce_radius_after=5,
std=2,
reduce_std_after=5,
step=0.3,
reduce_step_after=5,
)
sofm.train(data, epochs=40)
plt.subplot(1, n_columns, index)
plt.title(conf['title'])
plt.scatter(*data.T, color=blue, alpha=0.05)
plt.scatter(*sofm.weight, color=red)
weights = sofm.weight.reshape((2, GRID_HEIGHT, GRID_WIDTH))
plot_2d_grid(weights, color=red, hexagon=conf['use_hexagon_grid'])
plt.show()
|
[
"mail@itdxer.com"
] |
mail@itdxer.com
|
277892508b145e197f2e8e451e059b45ae7d1432
|
0b64e696083d567ed18e6366d8bd8e99733e1485
|
/node_modules/socket.io/node_modules/redis/node_modules/hiredis/build/c4che/Release.cache.py
|
760ac556fb84b9aa1a219e95c3e78caf69dbbc15
|
[
"MIT"
] |
permissive
|
iambibhas/myn3
|
340286d56edcde4ad024b63f0b12e1ecb7c6b15f
|
994c2850ac76920289004dc67f46bcedf7e652dc
|
refs/heads/master
| 2021-01-01T05:53:35.376823
| 2012-09-24T20:20:16
| 2012-09-24T20:20:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
AR = '/usr/bin/ar'
ARFLAGS = 'rcs'
CCFLAGS = ['-g']
CCFLAGS_MACBUNDLE = ['-fPIC']
CCFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CC_VERSION = ('4', '6', '3')
COMPILER_CXX = 'g++'
CPP = '/usr/bin/cpp'
CPPFLAGS_NODE = ['-D_GNU_SOURCE']
CPPPATH_NODE = '/usr/include/nodejs'
CPPPATH_ST = '-I%s'
CXX = ['/usr/bin/g++']
CXXDEFINES_ST = '-D%s'
CXXFLAGS = ['-g', '-Wall', '-O3']
CXXFLAGS_DEBUG = ['-g']
CXXFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CXXFLAGS_RELEASE = ['-O2']
CXXLNK_SRC_F = ''
CXXLNK_TGT_F = ['-o', '']
CXX_NAME = 'gcc'
CXX_SRC_F = ''
CXX_TGT_F = ['-c', '-o', '']
DEST_BINFMT = 'elf'
DEST_CPU = 'x86_64'
DEST_OS = 'linux'
FULLSTATIC_MARKER = '-static'
LIBDIR = '/home/bibhas/.node_libraries'
LIBPATH_HIREDIS = '../deps/hiredis'
LIBPATH_NODE = '/usr/lib'
LIBPATH_ST = '-L%s'
LIB_HIREDIS = 'hiredis'
LIB_ST = '-l%s'
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINK_CXX = ['/usr/bin/g++']
NODE_PATH = '/home/bibhas/.node_libraries'
PREFIX = '/usr/local'
PREFIX_NODE = '/usr'
RANLIB = '/usr/bin/ranlib'
RPATH_ST = '-Wl,-rpath,%s'
SHLIB_MARKER = '-Wl,-Bdynamic'
SONAME_ST = '-Wl,-h,%s'
STATICLIBPATH_ST = '-L%s'
STATICLIB_MARKER = '-Wl,-Bstatic'
STATICLIB_ST = '-l%s'
macbundle_PATTERN = '%s.bundle'
program_PATTERN = '%s'
shlib_CXXFLAGS = ['-fPIC', '-DPIC']
shlib_LINKFLAGS = ['-shared']
shlib_PATTERN = 'lib%s.so'
staticlib_LINKFLAGS = ['-Wl,-Bstatic']
staticlib_PATTERN = 'lib%s.a'
|
[
"iambibhas@gmail.com"
] |
iambibhas@gmail.com
|
47c4dfc1e15fb0f15d11f9e64213a4ad1ec7b299
|
f4f19a0b856ba36100f67272b05dad90c76b7457
|
/pre_processing/pre_process.py
|
d798aafcbd20c54aaac01bdeef281b7092d8d104
|
[] |
no_license
|
JamesBrace/kaggle
|
b9d8130aa1b5d17a2d89f3fa64b1142eb7167f7e
|
2069a5a3afa236bf57b25526439b5d5950e4b136
|
refs/heads/master
| 2021-04-26T22:28:18.676810
| 2018-03-06T18:25:43
| 2018-03-06T18:25:43
| 124,097,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,993
|
py
|
import numpy as np
from skimage.feature import canny
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage import morphology
from skimage import util
class PreProcessedData:
def __init__(self):
self.x = []
self.y = []
self.cannied_images = []
self.filled_images = []
self.cleaned_images = []
self.processed_images = []
print("Getting and reshaping data")
self.get_and_reshape_data()
print("Pre-processing data")
self.pre_process()
def get_and_reshape_data(self):
self.x = np.loadtxt("../data/train_x.csv", delimiter=",") # load from text
self.y = np.loadtxt("../data/train_y.csv", delimiter=",")
self.x = self.x.reshape(-1, 64, 64) # reshape
self.y = self.y.reshape(-1, 1)
plt.imshow(self.x[0], cmap='gray')
plt.show()
def pre_process(self):
print("Canny-ing images")
self.canny_images()
print("Filling images")
self.fill_images()
print("Cleaning images")
self.clean_images()
def canny_images(self):
print(self.x[0])
print("Inverting images")
inverted_images = list(map(util.invert, self.x))
plt.imshow(inverted_images[0], cmap='gray')
plt.show()
print("Cannying inverted images")
self.cannied_images = list(map(canny, inverted_images))
# Done for test purposes
self.display_canny_image_example()
def display_canny_image_example(self):
plt.imshow(self.cannied_images[0], cmap='gray')
plt.show()
fig, ax = plt.subplots(figsize=(64, 64))
ax.imshow(self.cannied_images[0], cmap=plt.cm.gray, interpolation='nearest')
ax.set_title('Canny detector')
ax.axis('off')
ax.set_adjustable('box-forced')
plt.show()
def fill_images(self):
self.filled_images = list(map(ndi.binary_fill_holes, self.cannied_images))
self.cannied_images = []
# Done for test purposes
self.display_filled_image_example()
def display_filled_image_example(self):
plt.imshow(self.filled_images[0], cmap='gray')
plt.show()
fig, ax = plt.subplots(figsize=(64, 64))
ax.imshow(self.filled_images[0], cmap=plt.cm.gray, interpolation='nearest')
ax.set_title('filling the holes')
ax.axis('off')
plt.show()
def clean_images(self):
self.cleaned_images = morphology.remove_small_objects(self.filled_images, 21)
self.filled_images = []
self.display_clean_image_example()
def display_clean_image_example(self):
plt.imshow(self.cleaned_images[0], cmap='gray')
plt.show()
fig, ax = plt.subplots(figsize=(64, 64))
ax.imshow(self.cleaned_images[0], cmap=plt.cm.gray, interpolation='nearest')
ax.set_title('filling the holes')
ax.axis('off')
plt.show()
data = PreProcessedData()
|
[
"james.brace@mail.mcgill.ca"
] |
james.brace@mail.mcgill.ca
|
09360acd5784b7d43b7da742def5c650aacf37dc
|
38a972a3cd1fc303b5f877e24d65118912d85d1c
|
/path/to/virtualenv/project/Lib/site-packages/tensorflow/python/ops/accumulate_n_benchmark.py
|
9fb5b537c24e331fe9de5436d162df1024bcb89b
|
[] |
no_license
|
ZulfikarAkbar/YOLO_ObjectDetection
|
0c1015aa987d03329eae48a2053a07dda05d96c0
|
3517d0592a269f79df9afd82e0b1b0123bbe0473
|
refs/heads/master
| 2022-10-27T05:08:26.734173
| 2019-02-07T17:35:22
| 2019-02-07T17:35:22
| 169,613,306
| 0
| 1
| null | 2022-10-18T02:18:17
| 2019-02-07T17:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 5,606
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for accumulate_n() in math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
class AccumulateNBenchmark(test.Benchmark):
def _AccumulateNTemplate(self, inputs, init, shape, validate_shape):
var = gen_state_ops.temporary_variable(
shape=shape, dtype=inputs[0].dtype.base_dtype)
ref = state_ops.assign(var, init, validate_shape=validate_shape)
update_ops = [
state_ops.assign_add(
ref, tensor, use_locking=True).op for tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops.destroy_temporary_variable(ref, var_name=var.op.name)
def _AccumulateNInitializedWithFirst(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros_like(inputs[0]),
shape=inputs[0].get_shape(),
validate_shape=True)
def _AccumulateNInitializedWithMerge(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros_like(gen_control_flow_ops.merge(inputs)[0]),
shape=tensor_shape.vector(0),
validate_shape=False)
def _AccumulateNInitializedWithShape(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros(
shape=inputs[0].get_shape(), dtype=inputs[0].dtype.base_dtype),
shape=inputs[0].get_shape(),
validate_shape=True)
def _GenerateUnorderedInputs(self, size, n):
inputs = [random_ops.random_uniform(shape=[size]) for _ in xrange(n)]
random.shuffle(inputs)
return inputs
def _GenerateReplicatedInputs(self, size, n):
return n * self._GenerateUnorderedInputs(size, 1)
def _GenerateOrderedInputs(self, size, n):
inputs = self._GenerateUnorderedInputs(size, 1)
queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[inputs[0].dtype], shapes=[inputs[0].get_shape()])
for _ in xrange(n - 1):
op = queue.enqueue(inputs[-1])
with ops.control_dependencies([op]):
inputs.append(math_ops.tanh(1.0 + queue.dequeue()))
return inputs
def _GenerateReversedInputs(self, size, n):
inputs = self._GenerateOrderedInputs(size, n)
inputs.reverse()
return inputs
def _SetupAndRunBenchmark(self, graph, inputs, repeats, format_args):
with graph.as_default():
add_n = math_ops.add_n(inputs)
acc_n_first = self._AccumulateNInitializedWithFirst(inputs)
acc_n_merge = self._AccumulateNInitializedWithMerge(inputs)
acc_n_shape = self._AccumulateNInitializedWithShape(inputs)
test_ops = (("AddN", add_n.op),
("AccNFirst", acc_n_first.op),
("AccNMerge", acc_n_merge.op),
("AccNShape", acc_n_shape.op))
with session.Session(graph=graph):
for tag, op in test_ops:
for _ in xrange(100):
op.run() # Run for warm up.
start = time.time()
for _ in xrange(repeats):
op.run()
duration = time.time() - start
args = format_args + (tag, duration)
print(self._template.format(*args))
def _RunBenchmark(self, tag, input_fn, sizes, ninputs, repeats):
for size in sizes:
for ninput in ninputs:
graph = ops.Graph()
with graph.as_default():
inputs = input_fn(size, ninput)
format_args = (tag, size, ninput, repeats)
self._SetupAndRunBenchmark(graph, inputs, repeats, format_args)
def benchmarkAccumulateN(self):
self._template = "{:<15}" * 6
args = {
"sizes": (128, 128**2),
"ninputs": (1, 10, 100, 300),
"repeats": 100
}
benchmarks = (("Replicated", self._GenerateReplicatedInputs),
("Unordered", self._GenerateUnorderedInputs),
("Ordered", self._GenerateOrderedInputs),
("Reversed", self._GenerateReversedInputs))
print(self._template.format("", "Size", "#Inputs", "#Repeat", "Method",
"Duration"))
print("-" * 90)
for benchmark in benchmarks:
self._RunBenchmark(*benchmark, **args)
if __name__ == "__main__":
test.main()
|
[
"zulfikar.78.akbar@gmail.com"
] |
zulfikar.78.akbar@gmail.com
|
d68d032e544410088da51547b06a4ca3e587f2b2
|
f78e7917536b5ce8630fcab47428254fe6814591
|
/RIK_simulator/src/lbd_playback/bin/playbackUtils.py
|
12677392f8e812bce5f673014702e0941273b105
|
[] |
no_license
|
xuezhizeng/hwang_robot_works
|
da507993dbfb278e6981304d988d44e263a8b981
|
211f6aede9e5929ca4d174e8d1c28b3b3082752c
|
refs/heads/master
| 2020-03-07T14:31:44.041890
| 2017-09-05T15:25:23
| 2017-09-05T15:25:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,276
|
py
|
import numpy as np
import IK.transformations as T
from usingRosBag_linear import *
from IK.tongsCenter import *
class PlaybackUtils:
def __init__(self, vars):
# Global variables
self.positionDisplacement = np.array([0,0,-0.22])
self.vars = vars
# self.positionDisplacement = np.array([0,0,-0.38])
self.tongsLength = 0.2286
self.gripperLength = 0.1524
def getNextDataColumn(self, time, parser, tongsTransform):
'''
:param time: the desired time to be found (in seconds)
:param parser: the bag parser that contains the data table
:param tongsTransform: tongs transform object to get center
:return: pos, quaternion, encoder, force values corresponding to the given time
'''
timeIdx = self.find_closest(parser.timeStampArray, time)
time = parser.resample_time_stamp[timeIdx]
pos = parser.vivePos_interpolated[timeIdx]
pos = self.transformPosition(pos)
quat = parser.viveQuat_interpolated[timeIdx]
encoder = parser.encoderarray_interpolated[timeIdx]
pos = tongsTransform.getCenterPosition(pos,quat,encoder)
return time, pos, quat, encoder
def find_closest(self,A, target):
#A must be sorted
idx = A.searchsorted(target)
idx = np.clip(idx, 1, len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target - left < right - target
return idx
def checkValidConfig(self,pos, quat):
'''
checks if the given position and orientation are reachable by the ur5 robot arm
:param pos:
:param quat:
:return:
'''
# stub
return True
def transformPosition(self, pos):
'''
rotates the position to the VREP and urscript version of global space
:param pos:
:return:
'''
# posRet = [pos[1],-pos[0],pos[2]]
posRet = pos
posRet = np.array(posRet)
posRet -= self.positionDisplacement
return posRet.tolist()
def transformQuat(self, quat):
'''
rotates the quaternion to the VREP and urscript version of global space
:param pos:
:return:
'''
quatMat = T.quaternion_matrix(quat)
retMat = np.zeros((4,4))
retMat[:,0] = quatMat[:,1]
retMat[:,1] = -quatMat[:,0]
retMat[:,2] = quatMat[:,2]
return T.quaternion_from_matrix(quatMat)
def getGripperValue(self):
'''
returns the gripper value (between 0 and 0.085) that corresponds to the encoder value in radians
:param enocder: encoder value in radians
:return:
'''
# 0 for open, 1 for close
flag = 0
pos = self.vars.eeGoalPos
quat = self.vars.eeGoalOr
encoder = self.vars.encoderValue
if encoder < 0.035 and flag == 0:
encoder = 0.0
flag = 1
elif encoder >= 0.035 and flag == 1:
encoder = 0.085
flag = 0
return encoder
'''
distance = self.vars.TongsTransform.getTongsDistance(pos,quat,encoder)
if distance < 0.0:
return 0.0
#if distance > 0.066675:
# return 0.085
u = distance / 0.34
return u*0.085
'''
################## DEPRECATED FUNCTIONS ##############################################
'''
# get the next position and orientation in the file
# note that the position and orientation don't have to be at the same time in the csv
# quaternion is [w,x,y,z]
def getNextHandConfig(filePtr):
DEPRECATED
get the next position and orientation in the file
note that the position and orientation don't have to be at the same time in the csv
quaternion is [w,x,y,z]
:param filePtr:
:return:
posRet = []
quatRet = []
timeRet = []
while posRet == [] or quatRet == []:
line = filePtr.readline()
if line == '':
return [posRet, quatRet, timeRet]
lineArr = line.split(',')
time = extractTime(lineArr[0])
if posRet == []:
posXs = lineArr[1]
posYs = lineArr[2]
posZs = lineArr[3]
if not posXs == '':
posRet = [float(posXs), float(posYs), float(posZs)]
if not posRet == [] and not quatRet == []:
timeRet = time
break
if quatRet == []:
quatWs = lineArr[4]
quatXs = lineArr[5]
quatYs = lineArr[6]
quatZs = lineArr[7]
if not quatWs == '':
quatRet = [float(quatWs),float(quatXs),float(quatYs),float(quatZs)]
if not posRet == [] and not quatRet == []:
timeRet = time
break
posRet = transformPosition(posRet)
quatRet = transformQuat(quatRet)
# posRet = transformToGripperPos(posRet,quatRet)
posRet = tc.getCenterPosition(posRet, quatRet, 0.5)
return [posRet, quatRet, timeRet]
'''
'''
def extractTime(timeLine):
takes in the time header from the bag file and extracts the time
:param timeLine:
:return:
timeArr = timeLine.split(':')
return float(timeArr[-1])
'''
|
[
"hongyiwang@cs.wisc.edu"
] |
hongyiwang@cs.wisc.edu
|
2793bf43cb383c357d70e567eeb97526b4f54f97
|
ba0e07b34def26c37ee22b9dac1714867f001fa5
|
/azure-mgmt-web/azure/mgmt/web/models/network_access_control_entry.py
|
c63dd1cc8d4c8aec142f71ba8f7c5dea394ffc61
|
[
"MIT"
] |
permissive
|
CharaD7/azure-sdk-for-python
|
b11a08ac7d24a22a808a18203072b4c7bd264dfa
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
refs/heads/master
| 2023-05-12T12:34:26.172873
| 2016-10-26T21:35:20
| 2016-10-26T21:35:20
| 72,448,760
| 1
| 0
|
MIT
| 2023-05-04T17:15:01
| 2016-10-31T15:14:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkAccessControlEntry(Model):
"""NetworkAccessControlEntry.
:param action: Possible values include: 'Permit', 'Deny'
:type action: str or :class:`AccessControlEntryAction
<azure.mgmt.web.models.AccessControlEntryAction>`
:param description:
:type description: str
:param order:
:type order: int
:param remote_subnet:
:type remote_subnet: str
"""
_attribute_map = {
'action': {'key': 'action', 'type': 'AccessControlEntryAction'},
'description': {'key': 'description', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'remote_subnet': {'key': 'remoteSubnet', 'type': 'str'},
}
def __init__(self, action=None, description=None, order=None, remote_subnet=None):
self.action = action
self.description = description
self.order = order
self.remote_subnet = remote_subnet
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
8fcbfa32a3cb2aab4ae34e37e6ff4a569f55b5ae
|
41c0cbfbe922f09df9c6a4237c06a28ef458761c
|
/1278 B hyper set .py
|
293d5e43a6792d28080181b20ec02d965484ab1d
|
[] |
no_license
|
adityachaudhary147/py-codes
|
8d45bfe3d3b67e4e802a2c1e01199551ef226aaa
|
6a8918b5c6fca19ff74cef0dcd676c04b28ee8c4
|
refs/heads/master
| 2023-04-12T09:52:09.622458
| 2021-05-17T07:44:29
| 2021-05-17T07:44:29
| 244,203,716
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
#jai mata di#
import sys
sys.stdin = open('input.in', 'r')
sys.stdout = open('output.out', 'w')
#start the code from here
n,k2=map(int,input().split())
l=set()
for i in range(n):
e=input()
l.add(e)
r=set(l)
l=list(l)
an=0
for j in range(n-1):
for k in range(j+1,n):
we=[0]*k2
ty=0
while ty<k2:
if l[j][ty]==l[k][ty]:
we[ty]=l[j][ty]
else:
if l[j][ty]!='E' and l[k][ty]!='E':
we[ty]='E'
if l[j][ty]!='T' and l[k][ty]!='T':
we[ty]='T'
if l[j][ty]!='S' and l[k][ty]!='S':
we[ty]='S'
ty+=1
we=''.join(we)
# print(we)
if we in r:
an+=1
print(an//3)
|
[
"chaudharyaditya.in@gmail.com"
] |
chaudharyaditya.in@gmail.com
|
13e604425bfe67eacff60bf986160796280d1f75
|
c9fe27dd429741f2fd6d567e0aa157871fa89bed
|
/fork/introducer/introducer_api.py
|
efb7a706c223b3bdead2640e2d913a38101611bd
|
[
"Apache-2.0"
] |
permissive
|
Fork-Network/fork-blockchain
|
858d3aefe359a3fff547cf4464f45216b3718fa3
|
4e7c55b5787376dabacc8049eac49c0bb0bfd855
|
refs/heads/main
| 2023-06-23T00:28:14.607265
| 2021-07-24T02:23:22
| 2021-07-24T02:23:22
| 388,574,519
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
from typing import Callable, Optional
from fork.introducer.introducer import Introducer
from fork.protocols.introducer_protocol import RequestPeersIntroducer, RespondPeersIntroducer
from fork.protocols.protocol_message_types import ProtocolMessageTypes
from fork.server.outbound_message import Message, make_msg
from fork.server.ws_connection import WSForkConnection
from fork.types.peer_info import TimestampedPeerInfo
from fork.util.api_decorators import api_request, peer_required
from fork.util.ints import uint64
class IntroducerAPI:
introducer: Introducer
def __init__(self, introducer) -> None:
self.introducer = introducer
def _set_state_changed_callback(self, callback: Callable):
pass
@peer_required
@api_request
async def request_peers_introducer(
self,
request: RequestPeersIntroducer,
peer: WSForkConnection,
) -> Optional[Message]:
max_peers = self.introducer.max_peers_to_send
if self.introducer.server is None or self.introducer.server.introducer_peers is None:
return None
rawpeers = self.introducer.server.introducer_peers.get_peers(
max_peers * 5, True, self.introducer.recent_peer_threshold
)
peers = []
for r_peer in rawpeers:
if r_peer.vetted <= 0:
continue
if r_peer.host == peer.peer_host and r_peer.port == peer.peer_server_port:
continue
peer_without_timestamp = TimestampedPeerInfo(
r_peer.host,
r_peer.port,
uint64(0),
)
peers.append(peer_without_timestamp)
if len(peers) >= max_peers:
break
self.introducer.log.info(f"Sending vetted {peers}")
msg = make_msg(ProtocolMessageTypes.respond_peers_introducer, RespondPeersIntroducer(peers))
return msg
|
[
"bekbol17281923@outlook.com"
] |
bekbol17281923@outlook.com
|
fb384863ec5d9329a46985b438244bc624df9626
|
c24212464eb84588edc7903a8905f2a881d578c4
|
/migrations/versions/46e80c86a0fb_.py
|
044aab1785b0ec919d392c0f49889f32428e6dcb
|
[] |
no_license
|
the-akira/Flask-Library
|
c533dc2fd1ac2d3d9e2732e7c7bed5b8cc7ca4bd
|
833e77660053b1e95975ccdf8bf41a035722975c
|
refs/heads/master
| 2023-05-25T12:08:15.898134
| 2023-02-07T23:36:50
| 2023-02-07T23:36:50
| 205,951,022
| 5
| 2
| null | 2023-02-15T22:08:36
| 2019-09-02T23:26:50
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
"""empty message
Revision ID: 46e80c86a0fb
Revises: 5ef733a72780
Create Date: 2022-05-14 05:28:36.082684
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '46e80c86a0fb'
down_revision = '5ef733a72780'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('analysis', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'analysis', 'user', ['user_id'], ['id'])
op.alter_column('book', 'image_book',
existing_type=sa.VARCHAR(length=20),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('book', 'image_book',
existing_type=sa.VARCHAR(length=20),
nullable=False)
op.drop_constraint(None, 'analysis', type_='foreignkey')
op.drop_column('analysis', 'user_id')
# ### end Alembic commands ###
|
[
"gabrielfelippe90@gmail.com"
] |
gabrielfelippe90@gmail.com
|
0af7ed5c92a7008afe7dce8d65ae9ad39ac90809
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/greynoise/icon_greynoise/actions/get_tag_details/action.py
|
891477fb89da35b5e6cce9779412d9d97fdeb6ce
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980
| 2023-08-31T10:25:36
| 2023-08-31T10:25:36
| 190,435,635
| 61
| 60
|
MIT
| 2023-09-14T08:47:37
| 2019-06-05T17:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
import insightconnect_plugin_runtime
from .schema import GetTagDetailsInput, GetTagDetailsOutput, Input, Component
# Custom imports below
from icon_greynoise.util.util import GNRequestFailure
from greynoise.exceptions import RequestFailure
class GetTagDetails(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_tag_details",
description=Component.DESCRIPTION,
input=GetTagDetailsInput(),
output=GetTagDetailsOutput(),
)
def run(self, params={}):
tag_name = params.get(Input.TAG_NAME).lower()
output = {}
try:
resp = self.connection.gn_client.metadata()
for tag in resp["metadata"]:
if tag["name"].lower() == tag_name:
output = tag
except RequestFailure as e:
raise GNRequestFailure(e.args[0], e.args[1])
if output:
return output
else:
return {"name": params.get(Input.TAG_NAME), "description": "Tag Not Found"}
|
[
"noreply@github.com"
] |
rapid7.noreply@github.com
|
314851d86273e01bceb3c684924368522f40ba2e
|
9a3b9de5eba5585cff302dde267920269ab338ae
|
/zeus/networks/quant.py
|
eea5ce5ab0b4276fa82f0bbd22fd4222941b3d2e
|
[
"MIT"
] |
permissive
|
Jizhongpeng/xingtian
|
835f5b8d997d5dcdd13a77ad10bc658704892b18
|
a9bdde734f14111854ed666dfdc780d5fe5311b1
|
refs/heads/master
| 2023-01-09T13:07:41.498246
| 2020-11-10T01:24:42
| 2020-11-10T01:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Quantized Convlution."""
import logging
import zeus
from zeus.modules.operators import ops, quant
from zeus.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.SEARCH_SPACE)
class Quantizer(object):
"""Model Quantization class."""
def __init__(self, model, nbit_w_list=8, nbit_a_list=8, skip_1st_layer=True):
super().__init__()
self.idx = 0
self.nbit_w_list = nbit_w_list
self.nbit_a_list = nbit_a_list
self.skip_1st_layer = skip_1st_layer
self.model = model
def _next_nbit(self):
"""Get next nbit."""
if isinstance(self.nbit_w_list, list) and isinstance(self.nbit_a_list, list):
nbit_w, nbit_a = self.nbit_w_list[self.idx], self.nbit_a_list[self.idx]
self.idx += 1
else:
nbit_w, nbit_a = self.nbit_w_list, self.nbit_a_list
return nbit_w, nbit_a
def _quant_conv(self, model):
"""Quantize the convolutional layer."""
if not isinstance(model, ops.Conv2d):
return model
nbit_w, nbit_a = self._next_nbit()
quant_model = quant.QuantConv(model.in_channels, model.out_channels, model.kernel_size,
model.stride, model.padding, model.dilation, model.groups, model.bias)
quant_model.build(nbit_w=nbit_w, nbit_a=nbit_a)
if zeus.is_torch_backend():
if nbit_w == 8:
quant_model = ops.QuantizeConv2d(model.in_channels, model.out_channels, model.kernel_size,
model.stride, model.padding, model.dilation, model.groups,
quant_bit=nbit_w)
return quant_model
def __call__(self):
"""Quantize the entire model."""
if self.nbit_w_list is None or self.nbit_a_list is None:
logging.warning("nbit_w or nbit_a is None, model can not be quantified.")
return self.model
is_first_conv = True
for name, layer in list(self.model.named_modules()):
if not isinstance(layer, ops.Conv2d) and self.skip_1st_layer:
continue
if is_first_conv:
is_first_conv = False
continue
quant_conv = self._quant_conv(layer)
self.model.set_module(name, quant_conv)
return self.model
def custom_hooks(self):
"""Calculate flops and params."""
return quant.quant_custom_ops()
|
[
"hustqj@126.com"
] |
hustqj@126.com
|
2db06d443b7fadfd5bf1b848b96ecd5dfcfcd003
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/linear_dynamical_systems/iterated_regression.py
|
86e5d1169c94d3a0da489858d906ac228f583206
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434
| 2021-09-30T20:59:01
| 2021-09-30T21:07:02
| 156,725,548
| 1
| 0
|
Apache-2.0
| 2018-11-08T15:13:53
| 2018-11-08T15:13:52
| null |
UTF-8
|
Python
| false
| false
| 5,392
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regularized iterated regression for estimating AR parameters in ARMA models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
from statsmodels.tsa.tsatools import lagmat
def fit_arparams_iter(outputs, inputs, p, q, r, l2_reg=0.0):
"""Iterative regression for estimating AR params in ARMAX(p, q, r) model.
The iterative AR regression process provides consistent estimates for the
AR parameters of an ARMAX(p, q, r) model after q iterative steps.
It first fits an ARMAX(p, 0, r) model with least squares regression, then
ARMAX(p, 1, r), and so on, ..., til ARMAX(p, q, r). At the i-th step, it
fits an ARMAX(p, i, r) model, according to estimated error terms from the
previous step.
For description of the iterative regression method, see Section 2 of
`Consistent Estimates of Autoregressive Parameters and Extended Sample
Autocorrelation Function for Stationary and Nonstationary ARMA Models` at
https://www.jstor.org/stable/2288340.
The implementation here is a generalization of the method mentioned in the
paper. We adapt the method for multidimensional outputs, exogenous inputs, nan
handling, and also add regularization on the MA parameters.
Args:
outputs: Array with the output values from the LDS, nans allowed.
inputs: Array with exogenous inputs values, nans allowed. Could be None.
p: AR order, i.e. max lag of the autoregressive part.
q: MA order, i.e. max lag of the error terms.
r: Max lag of the exogenous inputs.
l2_reg: L2 regularization coefficient, to be applied on MA coefficients.
Returns:
Fitted AR coefficients.
"""
if outputs.shape[1] > 1:
# If there are multiple output dimensions, fit autoregressive params on
# each dimension separately and average.
params_list = [
fit_arparams_iter(outputs[:, j:j+1], inputs, p, q, r, l2_reg=l2_reg) \
for j in xrange(outputs.shape[1])]
return np.mean(
np.concatenate([a.reshape(1, -1) for a in params_list]), axis=0)
# We include a constant term in regression.
k_const = 1
# Input dim. If inputs is None, then in_dim = 0.
in_dim = 0
if inputs is not None:
in_dim = inputs.shape[1]
# Lag the inputs to obtain [?, r], column j means series x_{t-j}.
# Use trim to drop rows with unknown values both at beginning and end.
lagged_in = np.concatenate(
[lagmat(inputs[:, i], maxlag=r, trim='both') for i in xrange(in_dim)],
axis=1)
# Since we trim in beginning, the offset is r.
lagged_in_offset = r
# Lag the series itself to p-th order.
lagged_out = lagmat(outputs, maxlag=p, trim='both')
lagged_out_offset = p
y = outputs
y_offset = 0
# Estimated residuals, initialized to 0.
res = np.zeros_like(outputs)
for i in xrange(q + 1):
# Lag the residuals to i-th order in i-th iteration.
lagged_res = lagmat(res, maxlag=i, trim='both')
lagged_res_offset = y_offset + i
# Compute offset in regression, since lagged_in, lagged_out, and lagged_res
# have different offsets. Align them.
if inputs is None:
y_offset = max(lagged_out_offset, lagged_res_offset)
else:
y_offset = max(lagged_out_offset, lagged_res_offset, lagged_in_offset)
y = outputs[y_offset:, :]
# Concatenate all variables in regression.
x = np.concatenate([
lagged_out[y_offset - lagged_out_offset:, :],
lagged_res[y_offset - lagged_res_offset:, :]
],
axis=1)
if inputs is not None:
x = np.concatenate([lagged_in[y_offset - lagged_in_offset:, :], x],
axis=1)
# Add constant term as the first variable.
x = add_constant(x, prepend=True)
if x.shape[1] < k_const + in_dim * r + p + i:
raise ValueError('Insufficient sequence length for model fitting.')
# Drop rows with nans.
arr = np.concatenate([y, x], axis=1)
arr = arr[~np.isnan(arr).any(axis=1)]
y_dropped_na = arr[:, 0:1]
x_dropped_na = arr[:, 1:]
# Only regularize the MA part.
alpha = np.concatenate(
[np.zeros(k_const + in_dim * r + p), l2_reg * np.ones(i)], axis=0)
# When L1_wt = 0, it's ridge regression.
olsfit = OLS(y_dropped_na, x_dropped_na).fit_regularized(
alpha=alpha, L1_wt=0.0)
# Update estimated residuals.
res = y - np.matmul(x, olsfit.params.reshape(-1, 1))
if len(olsfit.params) != k_const + in_dim * r + p + q:
raise ValueError('Expected param len %d, got %d.' %
(k_const + in_dim * r + p + q, len(olsfit.params)))
if q == 0:
return olsfit.params[-p:]
return olsfit.params[-(p + q):-q]
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
59fc04f65d00227209003ea2836478c904403fdb
|
34f365117eb1d846fa922c24f3fc650188ce9746
|
/bin/bed2countString.py
|
a029e30899b4529ca74ab313a5ed6655280c54f6
|
[
"MIT"
] |
permissive
|
PinarSiyah/NGStoolkit
|
53ac6d87a572c498414a246ae051785b40fbc80d
|
b360da965c763de88c9453c4fd3d3eb7a61c935d
|
refs/heads/master
| 2021-10-22T04:49:51.153970
| 2019-03-08T08:03:28
| 2019-03-08T08:03:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
#!/usr/bin/env python
import os
import sys
import bed
import generalUtils
import argparse
import random
parser = argparse.ArgumentParser(description='count strings')
parser.add_argument('-i', required= True, help='input')
parser.add_argument('-o', required= True, help='output')
parser.add_argument('-fasta', required= True, help='fasta reference')
parser.add_argument('-string', required= True, help='string to count')
args = parser.parse_args()
bedFile = args.i
output = args.o
fasta = args.fasta
string = args.string
bedObject = bed.bed(bedFile)
for bedline in bedObject.read():
count = bedline.countString(fasta, string)
fields = bedline.fields()
fields.append(str(count))
line = bedline.fields2line(fields)
out.write(line + '\n')
|
[
"adebali@users.noreply.github.com"
] |
adebali@users.noreply.github.com
|
ffa90cad695dc1a89745ab2dd5a57a1006b45f38
|
3716e91c0a18a2cf0b5807cc673d95a7539b008c
|
/Forest/BackwoodsFork.py
|
f295233eb6d3ecd0680e498a956ebd483a4ff8fb
|
[] |
no_license
|
kiwiapple87/CodeCombat-1
|
47f0fa6d75d6d3e9fb9c28feeb6fe2648664c1aa
|
ce0201e5ed099193ca40afd3b7abeee5a3732387
|
refs/heads/master
| 2021-05-01T16:38:03.575842
| 2016-08-25T11:13:26
| 2016-08-25T11:13:26
| 66,552,813
| 1
| 0
| null | 2016-08-25T11:39:20
| 2016-08-25T11:39:18
| null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
# https://codecombat.com/play/level/backwoods-fork
# Incoming ogres!
# Use the checkAndAttack function to make your code easy to read.
# This function has a parameter.
# An parameter is a way of passing information into a function.
def checkAndAttack(target):
# The 'target' parameter is just a variable!
# It contains the argument when the function was called.
if target:
hero.attack(target)
hero.moveXY(43, 34)
while True:
hero.moveXY(58, 52)
topEnemy = hero.findNearestEnemy()
checkAndAttack(topEnemy)
hero.moveXY(58, 16)
topEnemy = hero.findNearestEnemy()
checkAndAttack(topEnemy)
# Move to the bottom X mark.
# Create a variable named bottomEnemy and find the nearest enemy.
# Use the checkAndAttack function, and include the bottomEnemy variable.
|
[
"vadim-job-hg@yandex.ru"
] |
vadim-job-hg@yandex.ru
|
e1dc4656c4add693f2dbb8e3c9a87990852101c8
|
dccdb71dd75560ffeb076cedbd78b36e33b3adf2
|
/EstruturaSequencial/ex02_numero.py
|
5a2f85ce99f95f27163e7a478296cece3c9b0762
|
[] |
no_license
|
IngridFCosta/exerciciosPythonBrasil
|
6fa669896cae135142101c522be7a919bda583b7
|
5c84d70f720b45a260320b08a5103bad5ce78339
|
refs/heads/master
| 2023-04-22T02:34:40.206350
| 2021-05-06T17:14:50
| 2021-05-06T17:14:50
| 291,265,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
"""2-Faça um Programa que peça um número e então mostre a mensagem O número informado foi [número]."""
numero=int(input('Escreva um número: '))
print(f'O número informado foi {numero}')
|
[
"49729290+IngridFCosta@users.noreply.github.com"
] |
49729290+IngridFCosta@users.noreply.github.com
|
a4d90d95aa7d47485090ac92009ccbef391cdfec
|
5dfb9ca5e0c8cb4cb7a7a92d6f6a34b34a841869
|
/LeetCodeSolutions/python/513_Find_Bottom_Left_Tree_Value.py
|
808427669e6aa627ab9f1b0b0ec2c32986e01b6e
|
[
"MIT"
] |
permissive
|
ChuanleiGuo/AlgorithmsPlayground
|
2f71d29e697a656562e3d2a2de783d964dc6a325
|
90b6287b742c8bfd3797540c408d679be2821a40
|
refs/heads/master
| 2021-01-11T18:30:43.218959
| 2018-11-19T02:20:31
| 2018-11-19T02:20:31
| 79,550,052
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
queue = [root]
for node in queue:
queue += filter(None, [node.right, node.left])
return node.val
|
[
"chuanleiguo@gmail.com"
] |
chuanleiguo@gmail.com
|
77ff1c2b53c17c875b84bb4f6b5a2944e786527e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_207/ch136_2020_04_01_11_57_23_991207.py
|
d8760caf5639fb9f950467abccac43d652a78617
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
import random
dinheiros = 10
d1 = random.randint(1,6)
d2 = random.randint(1,6)
d3 = random.randint(1,6)
soma = d1+d2+d3
print (soma)
JOGO = False
#fase de DICAS
print ("DICAS: \n Você possui {0} dinheiros." .format(dinheiros))
if dinheiros <=0:
print ("Você perdeu!")
pergunta1 = input("Você quer uma dica? ")
if pergunta1 == 'sim':
dinheiros -= 1
perguntaA = int(input ("Me diga um valor possível para a soma "))
perguntaB = int(input ("Me diga outro valor possível para a soma "))
perguntaC = int(input ("Me diga outro valor possível para a soma "))
if perguntaA == soma:
print ("Está entre os 3")
elif perguntaB == soma:
print ("Está entre os 3")
elif perguntaC == soma:
print ("Está entre os 3")
else:
print ("Não está entre os 3")
JOGO = True
while JOGO:
if dinheiros <=0:
print ("Você perdeu!")
JOGO = False
else:
print(" {0} dinheiros disponível" .format(dinheiros))
resposta = int(input ("Qual o valor da soma?"))
if resposta == soma:
dinheiros += 5*dinheiros
print ("Você ganhou o jogo com {0} dinheiros" .format (dinheiros))
JOGO = False
else:
dinheiros -= 1
|
[
"you@example.com"
] |
you@example.com
|
e3feca8063a41c8988af243eef5ea642b26fe9c5
|
4a74875c7366a19b7189fcb89fa0fa27abc4309e
|
/data_pipeline/processor/oracle_where_parser.py
|
60562db03590c59c92acd5b85aa9bc775183f86c
|
[
"Apache-2.0"
] |
permissive
|
saubury-iag/data_pipeline
|
d865d66d25eeb4ea6c6a655ae934bfe83c0efa06
|
4ad04198ed48c643045113c6e2c3e0848adbdec6
|
refs/heads/master
| 2021-07-23T08:43:46.754162
| 2017-11-01T05:05:23
| 2017-11-01T05:05:23
| 108,808,749
| 0
| 0
| null | 2017-10-30T06:06:41
| 2017-10-30T06:06:41
| null |
UTF-8
|
Python
| false
| false
| 5,101
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: oracle_where_parser
# Purpose: Parses Where component of LogMiner SQL statements
#
# Notes:
#
###############################################################################
from enum import Enum
import data_pipeline.constants.const as const
from .oracle_base_parser import OracleBaseParser
WhereState = Enum('WhereState', 'start key operator value')
class OracleWhereParser(OracleBaseParser):
def __init__(self):
super(OracleWhereParser, self).__init__()
self._primarykey_list = None
self._curr_key = None
self._statement = None
def _set_primary_keys_from_string(self, pkstr, table_name):
pkstr = pkstr.strip()
if pkstr:
if pkstr == const.NO_KEYFIELD_STR:
self._primarykey_list = []
else:
self._primarykey_list = [pk.strip().upper()
for pk in pkstr.split(const.COMMA)]
self._logger.debug(
"[{table}] Set primary keys = {pks}"
.format(table=table_name, pks=self._primarykey_list))
return self._primarykey_list
def set_primary_keys(self, value):
self._primarykey_list = value
def set_statement(self, value):
self._statement = value
def parse(self):
self._char_buff = const.EMPTY_STRING
self._parsing_state = WhereState.start
while self._read_cursor < len(self._commit_statement):
if self._can_consume_char():
self._consume()
self._transition_where_parsing_state()
self._next()
def _can_consume_char(self):
return (self._parsing_state == WhereState.key or
self._parsing_state == WhereState.value)
def _transition_where_parsing_state(self):
if self._parsing_state == WhereState.start:
self._transition_from_where_start_parsing()
elif self._parsing_state == WhereState.key:
self._transition_from_where_key_parsing()
elif self._parsing_state == WhereState.operator:
self._transition_from_where_operator_parsing()
elif self._parsing_state == WhereState.value:
self._transition_from_where_value_parsing()
def _transition_from_where_start_parsing(self):
if self._at_key_word_boundary():
self._empty_buffer()
self._parsing_state = WhereState.key
def _transition_from_where_key_parsing(self):
if self._at_key_word_boundary():
self._curr_key = self._flush_buffer()
self._parsing_state = WhereState.operator
def _transition_from_where_operator_parsing(self):
is_null_index = self._commit_statement.find(const.IS_NULL,
self._read_cursor)
if self._read_cursor == is_null_index:
# Prefer using None over const.IS_NULL in case a
# key's value is actually 'IS NULL'
self._statement.add_condition(self._curr_key, None)
self._read_cursor += len(const.IS_NULL)
self._empty_buffer()
self._parsing_state = WhereState.start
elif self._at_value_word_boundary():
self._empty_buffer()
self._parsing_state = WhereState.value
def _transition_from_where_value_parsing(self):
if self._at_escaped_single_quote():
self._next()
elif self._at_value_word_boundary():
where_value = self._flush_buffer()
self._statement.add_condition(self._curr_key, where_value)
self._parsing_state = WhereState.start
def _can_add_primary_key(self):
return (self._primarykey_list is None or
const.NO_KEYFIELD_STR in self._primarykey_list or
self._curr_key.upper() in self._primarykey_list)
def _at_key_word_boundary(self):
return self._curr_char == const.DOUBLE_QUOTE
def _at_value_word_boundary(self):
return self._curr_char == const.SINGLE_QUOTE
def _flush_buffer(self):
# Always remove last char which is only useful when
# there are escaped single-quotes
return super(OracleWhereParser, self)._flush_buffer()[:-1]
|
[
"simon.aubury@iag.com.au"
] |
simon.aubury@iag.com.au
|
b15f2abae8cd733f498374b1a2d0c477cd073e9a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/482/usersdata/309/110136/submittedfiles/Av2_Parte4.py
|
4caf25849b6aea491973c78fc097c44d88370f19
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
# -*- coding: utf-8 -*-
mt=[]
lin=int(input("Digite a quantidade de linhas da matriz:"))
colu=int(input("Digite a quantidade de colunas da matriz:"))
for i in range (lin,0,-1):
lista=[]
for j in range (colu,0,-1):
lista[j]=(int(input("Digite um elemento para sua matriz:")))
mt[i]=lista
print(mt)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
3a3a2bda3c9dc71fb5f213b93e4215892c92b19a
|
354e2dede869a32e57c66c2e1b71bbd61d2cc36d
|
/widgets/pad.py
|
17d2e11a21f2a29919b0d0f7268c7bfef1cc107d
|
[
"MIT"
] |
permissive
|
hiddenvs/micropython_ra8875
|
5abf906d95c0c997ceb9638b8785c4cc2f803cf6
|
a61314d62d6add831f6618c857b01d1a5b7ce388
|
refs/heads/master
| 2023-04-09T22:37:23.097440
| 2021-04-16T13:13:34
| 2021-04-16T13:13:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,750
|
py
|
# pad.py Extension to lcd160gui providing the invisible touchpad class
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2020 Peter Hinch
# Usage: import classes as required:
# from gui.widgets.pad import Pad
import uasyncio as asyncio
from micropython_ra8875.py.ugui import Touchable
from micropython_ra8875.primitives.delay_ms import Delay_ms
# Pad coordinates relate to bounding box (BB). x, y are of BB top left corner.
# likewise width and height refer to BB
class Pad(Touchable):
long_press_time = 1000
def __init__(self, location, *, height=20, width=50, onrelease=True,
callback=None, args=[], lp_callback=None, lp_args=[]):
super().__init__(location, None, height, width, None, None, None, None, False, '', None)
self.callback = (lambda *_: None) if callback is None else callback
self.callback_args = args
self.onrelease = onrelease
self.lp_callback = lp_callback
self.lp_args = lp_args
self.lp_task = None # Long press not in progress
def show(self):
pass
def _touched(self, x, y): # Process touch
if self.lp_callback is not None:
self.lp_task = asyncio.create_task(self.longpress())
if not self.onrelease:
self.callback(self, *self.callback_args) # Callback not a bound method so pass self
def _untouched(self):
if self.lp_task is not None:
self.lp_task.cancel()
self.lp_task = None
if self.onrelease:
self.callback(self, *self.callback_args) # Callback not a bound method so pass self
async def longpress(self):
await asyncio.sleep_ms(Pad.long_press_time)
self.lp_callback(self, *self.lp_args)
|
[
"peter@hinch.me.uk"
] |
peter@hinch.me.uk
|
0c9daec6558d9de88338199192f4e5d1dad32639
|
8dca64dd11b23a7d59413ac8e28e92a0ab80c49c
|
/832. Flipping an Image/solution.py
|
25b69e5fc050245a58bc16a757d3ea451316a7c4
|
[] |
no_license
|
huangruihaocst/leetcode-python
|
f854498c0a1d257698e10889531c526299d47e39
|
8f88cae7cc982ab8495e185914b1baeceb294060
|
refs/heads/master
| 2020-03-21T20:52:17.668477
| 2018-10-08T20:29:35
| 2018-10-08T20:29:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
class Solution:
def flipAndInvertImage(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
return self.invert(self.reverse(A))
@staticmethod
def reverse(A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
return list(map(lambda l: list(reversed(l)), A))
@staticmethod
def invert(A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
return list(map(lambda l: list(map(lambda i: 1 - i, l)), A))
if __name__ == '__main__':
s = Solution()
print(s.flipAndInvertImage([[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]))
|
[
"huangruihaocst@126.com"
] |
huangruihaocst@126.com
|
8655a86f4d99e4dc3d21d0a740e200eda1cd4269
|
66643f48950453dd1cc408a763360db2be9942f6
|
/tests/validation/test_executable_definitions.py
|
4a21c63b65e4397cb38e995fa01b69400852109f
|
[
"MIT"
] |
permissive
|
khasbilegt/graphql-core
|
ac958b5a68c27acd0c7f96429deeca7f7f8736b3
|
fc76d01a2a134ba2cebd863bf48773fd44c2645b
|
refs/heads/main
| 2023-08-05T06:03:56.299244
| 2021-09-19T10:31:30
| 2021-09-19T10:31:30
| 408,735,141
| 1
| 0
|
MIT
| 2021-09-21T08:00:36
| 2021-09-21T08:00:35
| null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
from functools import partial
from graphql.validation import ExecutableDefinitionsRule
from .harness import assert_validation_errors
assert_errors = partial(assert_validation_errors, ExecutableDefinitionsRule)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_executable_definitions():
def with_only_operation():
assert_valid(
"""
query Foo {
dog {
name
}
}
"""
)
def with_operation_and_fragment():
assert_valid(
"""
query Foo {
dog {
name
...Frag
}
}
fragment Frag on Dog {
name
}
"""
)
def with_type_definition():
assert_errors(
"""
query Foo {
dog {
name
}
}
type Cow {
name: String
}
extend type Dog {
color: String
}
""",
[
{
"message": "The 'Cow' definition is not executable.",
"locations": [(8, 13)],
},
{
"message": "The 'Dog' definition is not executable.",
"locations": [(12, 13)],
},
],
)
def with_schema_definition():
assert_errors(
"""
schema {
query: Query
}
type Query {
test: String
}
extend schema @directive
""",
[
{
"message": "The schema definition is not executable.",
"locations": [(2, 13)],
},
{
"message": "The 'Query' definition is not executable.",
"locations": [(6, 13)],
},
{
"message": "The schema definition is not executable.",
"locations": [(10, 13)],
},
],
)
|
[
"cito@online.de"
] |
cito@online.de
|
6d8b0b5732d62a9cf3767687b8e8763bebd74f0c
|
0c154919a427bad71598bae69a8b5b6b94bc2627
|
/Posts/migrations/0008_post_vote.py
|
1f9c7557cad3250dbbcd9af07789b7d0075280aa
|
[] |
no_license
|
MichalDrosio/blog
|
b7b56ffd35ef82d2a5c3c791eff8d326644f5627
|
ebb7c07f29ea9de1f872bfeea79ae6d0c4822766
|
refs/heads/master
| 2020-12-22T13:02:54.573249
| 2020-07-27T11:25:16
| 2020-07-27T11:25:16
| 236,775,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# Generated by Django 3.0.8 on 2020-07-24 10:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Posts', '0007_auto_20200723_1357'),
]
operations = [
migrations.AddField(
model_name='post',
name='vote',
field=models.PositiveIntegerField(default=0),
),
]
|
[
"drosio.michal@gmail.com"
] |
drosio.michal@gmail.com
|
629bb2ace85054f0e0e1bf16e25f739810db624e
|
0e834094f5e4274b279939b81caedec7d8ef2c73
|
/m3/django_/mysite3/book/migrations/0003_auto_20200115_1745.py
|
e90a76a3af2a4b4753b0bf2a493db5ffb36cf7a9
|
[] |
no_license
|
SpringSnowB/All-file
|
b74eaebe1d54e1410945eaca62c70277a01ef0bf
|
03485c60e7c07352aee621df94455da3d466b872
|
refs/heads/master
| 2020-11-27T23:54:36.984555
| 2020-01-21T08:42:21
| 2020-01-21T08:42:21
| 229,651,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2020-01-15 09:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20200115_1603'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('age', models.IntegerField(default=1, verbose_name='年龄')),
('email', models.EmailField(max_length=254, null=True, verbose_name='邮箱')),
],
),
migrations.AddField(
model_name='book',
name='market_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=7, verbose_name='图书零售价'),
),
migrations.AddField(
model_name='book',
name='pub',
field=models.CharField(default='', max_length=30, verbose_name='出版社'),
),
]
|
[
"tszxwsb@163.com"
] |
tszxwsb@163.com
|
6267d925f4ba5879b734a6dcba1640ccedeb8b48
|
a2eaa3decc385dea227da8a99203f767f32cf941
|
/electronic_station/ascending_list.py
|
98a5261f1acf37986d291eb1cd62bca1a61480d4
|
[] |
no_license
|
vlad-bezden/py.checkio
|
94db32111eeeb2cd90c7b3c4606ea72cf2bb6678
|
6cd870ca3056cc9dcdce0ad520c27e92311719b3
|
refs/heads/master
| 2021-07-01T18:39:35.955671
| 2020-10-05T00:56:38
| 2020-10-05T00:56:38
| 93,111,389
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,075
|
py
|
"""Ascending List
https://py.checkio.org/en/mission/ascending-list/
Determine whether the sequence of elements items is ascending so that its
each element is strictly larger than (and not merely equal to) the
element that precedes it.
Input: Iterable with ints.
Output: bool
Output:
is_ascending_all([-5, 10, 99, 123456]). Exec time = 0.000124
is_ascending_set([-5, 10, 99, 123456]). Exec time = 0.000084
is_ascending_all([99]). Exec time = 0.000082
is_ascending_set([99]). Exec time = 0.000057
is_ascending_all([4, 5, 6, 7, 3, 7, 9]). Exec time = 0.000142
is_ascending_set([4, 5, 6, 7, 3, 7, 9]). Exec time = 0.000081
is_ascending_all([]). Exec time = 0.000090
is_ascending_set([]). Exec time = 0.000047
is_ascending_all([1, 1, 1, 1]). Exec time = 0.000103
is_ascending_set([1, 1, 1, 1]). Exec time = 0.000061
Conclusion: using set works faster than iterating using zip
"""
from typing import Callable, Sequence
from timeit import repeat
from dataclasses import dataclass
@dataclass
class Test:
data: Sequence[int]
expected: bool
TESTS = [
Test([-5, 10, 99, 123456], True),
Test([99], True),
Test([4, 5, 6, 7, 3, 7, 9], False),
Test([], True),
Test([1, 1, 1, 1], False),
]
def is_ascending_set(items: Sequence[int]) -> bool:
return sorted(set(items)) == items
def is_ascending_zip(items: Sequence[int]) -> bool:
return all(x < y for x, y in zip(items, items[1:]))
def validate(funcs: Sequence[Callable[[Sequence[int]], bool]]) -> None:
for test in TESTS:
for f in funcs:
result = f(test.data)
assert result == test.expected, f"{f.__name__}({test}), {result = }"
print("PASSED!!!\n")
if __name__ == "__main__":
funcs = [is_ascending_zip, is_ascending_set]
validate(funcs)
for test in TESTS:
for f in funcs:
t = repeat(stmt=f"f({test.data})", repeat=5, number=100, globals=globals())
print(f"{f.__name__}({test.data}). Exec time = {min(t):.6f}")
print()
|
[
"vlad.bezden@gmail.com"
] |
vlad.bezden@gmail.com
|
b03c20c06b007718fdc3396885410fb6db0e19f2
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/hQRuQguN4bKyM2gik_21.py
|
d59885f89bac86c7d6d3eae679f0d6dbfb33395c
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
def simple_check(a, b):
lo, hi = min(a,b), max(a,b)
cnt = 0
for i in range(lo):
if hi % lo == 0:
cnt += 1
hi = hi - 1
lo = lo - 1
return cnt
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
3fc2dbb3e16a2861f18b339ee15022a85d67550f
|
6b7bdeff133c37245698e450a01f75bb6a97c2a4
|
/28.colordialog.py
|
4a47f37fbfa2e3611337d803621896fb32a462cf
|
[] |
no_license
|
HardPlant/PyQT
|
9bb4f8e53417ede638bddd63571ade506785d2fb
|
fae0315620b3005ead451a170214c1334bac7fab
|
refs/heads/master
| 2020-03-24T23:49:47.248763
| 2018-08-05T07:59:17
| 2018-08-05T07:59:17
| 143,155,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
from PyQt5.QtWidgets import (QWidget, QPushButton, QFrame,
QColorDialog, QApplication)
from PyQt5.QtGui import QColor
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
col = QColor(0, 0, 0)
self.btn = QPushButton('Dialog', self)
self.btn.move(20, 20)
self.btn.clicked.connect(self.showDialog)
self.frm = QFrame(self)
self.frm.setStyleSheet("QWidget { background-color: %s }"
% col.name())
self.frm.setGeometry(130, 22, 100, 100)
self.setGeometry(300, 300, 250, 180)
self.setWindowTitle('Color dialog')
self.show()
def showDialog(self):
col = QColorDialog.getColor()
if col.isValid():
self.frm.setStyleSheet("QWidget { background-color: %s }"
% col.name())
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
[
"abc7988se@naver.com"
] |
abc7988se@naver.com
|
c9f7d93330c888d19c537cb39e06fdd586f5682a
|
3411ad233c411c06765f4b07f8670c12025178b6
|
/LCOF/31-40/32-1/32-1.py
|
478ca7b4eff8a95655fa61b25fb586420bfccdd7
|
[
"MIT"
] |
permissive
|
xuychen/Leetcode
|
7d9d31fed898ce58440f5ae6665d2ccaf1a4b256
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
refs/heads/master
| 2021-11-19T20:39:43.741589
| 2021-10-24T16:26:52
| 2021-10-24T16:26:52
| 140,212,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
import Queue
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
queue = Queue.Queue(maxsize=0)
queue.put(root)
result = []
while not queue.empty():
node = queue.get()
result.append(node.val)
if node.left:
queue.put(node.left)
if node.right:
queue.put(node.right)
return result
|
[
"xuychen@ucdavis.edu"
] |
xuychen@ucdavis.edu
|
4e1cd719cfa46c689fd0b63ee03bf2f1c80310ca
|
75a179e8ddba54442697de87a3846f1711a30bae
|
/crudoperation/crudoperation/wsgi.py
|
5d9ef489b1f5198e5c28ae26457fc488c0ac661e
|
[] |
no_license
|
amanlalwani007/drftutorial
|
2b5a5338b3146b1feb88c4d815fbf996dd49cb9d
|
4f5c651f4dee98a359b7a6e34d0ae9a8f8630e68
|
refs/heads/master
| 2023-07-09T01:28:04.921042
| 2021-08-21T10:59:06
| 2021-08-21T10:59:06
| 392,457,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
WSGI config for crudoperation project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crudoperation.settings')
application = get_wsgi_application()
|
[
"amanlalwani0807@gmail.com"
] |
amanlalwani0807@gmail.com
|
2aeb0cb802cd448cc40eb31633927bc1dec209e9
|
6288f56016e0a1dc9e4a4eceb18425d4ae27f4a3
|
/tests/test_shared_bandwidth_switch.py
|
a56dcf16eb26e6f748983a354187ef70850c7256
|
[] |
no_license
|
brettviren/ersatz
|
ec547fe347bfcc9ccafce7b159cc6378f3f4ab7d
|
187df8096d5dadae2a115a7796cafd6d59c4f6af
|
refs/heads/master
| 2021-01-17T22:14:06.475993
| 2018-01-21T18:31:31
| 2018-01-21T18:31:31
| 60,030,033
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,263
|
py
|
#!/usr/bin/env python3
import simpy
from ersatz.switch import Switch, LinkedSwitch
from ersatz.datum import Datum
from ersatz.monitoring import trace
def test_balance():
sw = Switch(None, 100)
for ip in range(5):
for op in range(max(0,ip-2),min(5,ip+2)):
sw.stage(Datum(ip, op, 10, None))
imb,count = sw.balance()
print ('%d iters: max adj: %.1f' % (count, imb))
sw.dump()
with open('test_balance.dot','w') as fp:
fp.write(sw.dot())
def slurp(env, outbox):
while True:
datum = yield outbox.get()
print ('final: %s' % datum)
print ('\tDONE: at %.1f, %s' % \
(env.now, datum.payload))
print('\t : %s -> %s' % (datum.txaddr, datum.rxaddr))
def add_stream(env, inbox, wait, txaddr, rxaddr, size, payload):
print ('waiting %f to start stream of "%s"' % (wait, payload))
yield env.timeout(wait)
print ('starting stream with payload "%s"' % payload)
yield inbox.put(Datum(txaddr, rxaddr, size, payload))
class FillData(object):
def __init__(self):
self.data = list()
def __call__(self, t, prio, eid, event):
self.data.append((t, prio, eid, event))
def test_switching():
env = simpy.Environment()
monitor = FillData()
trace(env, monitor)
sw = Switch(env, 10)
env.process(slurp(env, sw.outbox))
env.process(add_stream(env, sw.inbox, 0, 'p1', 'p2', 50, "datum1"))
env.process(add_stream(env, sw.inbox, 2, 'p1', 'p3', 50, "datum2"))
env.process(add_stream(env, sw.inbox, 4, 'p2', 'p3', 50, "datum3"))
env.run(until=200)
# for item in monitor.data:
# print (str(item))
def test_linked():
env = simpy.Environment()
nic1_addr = 'nic1'
nic1_tx = simpy.Store(env)
nic2_addr = 'nic2'
nic2_rx = simpy.Store(env)
env.process(add_stream(env, nic1_tx, 2, nic1_addr, nic2_addr, 50, "hello world"))
env.process(slurp(env, nic2_rx))
lsw = LinkedSwitch(env, 10)
print ("Linking nic1")
lsw.link_nic(nic1_addr, None, nic1_tx)
print ("Linking nic2")
lsw.link_nic(nic2_addr, nic2_rx, None)
print ("running")
env.run(until=200)
if '__main__' == __name__:
test_balance()
test_switching()
test_linked()
|
[
"brett.viren@gmail.com"
] |
brett.viren@gmail.com
|
918a5618d56e51317a4a67dafb2975bf2f9b5e58
|
15531adf1e9b66db6259a52d16d0b0ddd672f4e9
|
/backend-project/small_eod/institutions/migrations/0009_auto_20200618_2320.py
|
f79ca4d496e41841d8d2f2d73ddddb1de08763ad
|
[
"MIT"
] |
permissive
|
watchdogpolska/small_eod
|
2c81b0bb67113a4fd9f5c9c0f8b07daa2399e243
|
509f532e10c20e700cb9b37c73f4a695b53b42a8
|
refs/heads/dev
| 2023-09-01T01:30:44.681105
| 2022-03-31T19:36:39
| 2022-03-31T22:20:42
| 136,141,184
| 66
| 75
|
MIT
| 2023-09-04T14:33:37
| 2018-06-05T07:56:33
|
Python
|
UTF-8
|
Python
| false
| false
| 739
|
py
|
# Generated by Django 3.0.7 on 2020-06-18 23:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tags', '0005_merge_multiple_leaf_nodes'),
('institutions', '0008_auto_20200528_1330'),
]
operations = [
migrations.AddField(
model_name='institution',
name='comment',
field=models.CharField(blank=True, help_text='Comment for this case.', max_length=256, verbose_name='Comment'),
),
migrations.AddField(
model_name='institution',
name='tags',
field=models.ManyToManyField(blank=True, help_text='Choose tags.', to='tags.Tag', verbose_name='Tags'),
),
]
|
[
"noreply@github.com"
] |
watchdogpolska.noreply@github.com
|
f761f157069a51fef2dade3ebdadd11611dbe111
|
a18fba3eeb79b51faa55fcb1c45804ae57bb0aa4
|
/wall_erd/Wall/settings.py
|
7b1d1b9a14c56ad892d1bab1b6d1ec353c721c7b
|
[] |
no_license
|
Catrinici/Python_Django
|
a850f732734f799c923035d4b28a9553e288194f
|
9415b40a49fc9aa0a685937a62e259bd8103e208
|
refs/heads/master
| 2023-04-28T13:22:16.987969
| 2019-10-15T15:41:38
| 2019-10-15T15:41:38
| 206,885,302
| 1
| 0
| null | 2023-04-21T20:37:48
| 2019-09-06T22:52:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
"""
Django settings for Wall project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gi=3du&z-q8>8tc%z$x7qj#%zl=cm8xaya0mty3fxav3w#m2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.wall_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Wall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Wall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"catrinici.antonina@gmail.com"
] |
catrinici.antonina@gmail.com
|
6b00216e57dd1ea5b6a02c4851dc0365d2342a92
|
af8dfddd4ba4f03560f2f6930f88c4d8a0a8e8cf
|
/conf.py
|
6fe28580ca8c11405a26bca8d52a206bd6a6746f
|
[] |
no_license
|
Parham-Baghbanbashi/team-manual
|
155e243d8152c1c63214db8a698881d08b4765b3
|
9dbd11a2e340f6ce4ffe17dfe0bb8b9619ece914
|
refs/heads/master
| 2023-01-19T20:23:40.094323
| 2020-12-02T14:14:09
| 2020-12-02T14:14:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
import sphinx_rtd_theme
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Rams Robotics Manual'
copyright = '2020, FTC Team 16488'
author = 'FTC Team 16488'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
[
"daniel.gallo@ycdsbk12.ca"
] |
daniel.gallo@ycdsbk12.ca
|
d68496f8b528022a5ce09200a302dc3c2569c5b0
|
2293c76c3d18e2fcd44ded90bd40113d26285663
|
/pyeccodes/defs/grib2/tables/25/4_244_table.py
|
a1c940653667995441617ed12e107d0f12316077
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/pyeccodes
|
b1f121dbddf68d176a03805ed5144ba0b37ac211
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
refs/heads/master
| 2022-04-23T10:37:40.524078
| 2020-04-18T06:30:29
| 2020-04-18T06:30:29
| 255,554,540
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'No quality information available'},
{'abbr': 1, 'code': 1, 'title': 'Failed'},
{'abbr': 2, 'code': 2, 'title': 'Passed'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
|
[
"baudouin.raoult@ecmwf.int"
] |
baudouin.raoult@ecmwf.int
|
7139040a4e2346cd3caee6871414bf5ab8b75d35
|
a861a32ffdb368a721ff9e9006268f3dffbecc71
|
/app/firestore_service.py
|
4f3db3becefbccfafb97fa57a3a2f5575971f14a
|
[
"MIT"
] |
permissive
|
agomusa/flask-taskapp
|
039450080f551ff4ba9a7796835543290f8583ce
|
3c456b0dcfe8c30be44954b6c280d843653b8a3e
|
refs/heads/main
| 2023-05-30T17:16:57.256763
| 2021-06-14T20:04:22
| 2021-06-14T20:04:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
credential = credentials.ApplicationDefault()
firebase_admin.initialize_app(credential)
db = firestore.client()
def get_users():
return db.collection("users").get()
def get_user(user_id):
return db.collection("users").document(user_id).get()
def user_put(user_data):
user_ref = db.collection("users").document(user_data.username)
user_ref.set({"password": user_data.password})
def get_todos(user_id):
return db.collection("users").document(user_id).collection("todos").get()
def put_todo(user_id, description):
todos_collection_ref = db.collection(
"users").document(user_id).collection("todos")
todos_collection_ref.add({"description": description, "done": False})
def delete_todo(user_id, todo_id):
todo_ref = _get_todo_ref(user_id, todo_id)
todo_ref.delete()
def update_todo(user_id, todo_id, done):
todo_done = not bool(done)
todo_ref = _get_todo_ref(user_id, todo_id)
todo_ref.update({"done": todo_done})
def _get_todo_ref(user_id, todo_id):
return db.document("users/{}/todos/{}".format(user_id, todo_id))
|
[
"eliaz.bobadilladev@gmail.com"
] |
eliaz.bobadilladev@gmail.com
|
97da63adc46cd749c1cb8b35727cfa54ba4e723b
|
b0cdbad299f6174bfdb0fba173dbcf3889b82209
|
/Modules/os/gettextfiles.py
|
198d01bcec7b9bcc12a7a7c25a9bdee70cf4e946
|
[] |
no_license
|
deesaw/PythonD-06
|
a33e676f1e0cfc13b4ea645c8b60547b198239ac
|
3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa
|
refs/heads/master
| 2023-03-18T08:24:42.030935
| 2021-03-02T14:15:09
| 2021-03-02T14:15:09
| 343,797,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
import os
for (dirname, subdir, files) in os.walk('c:\\'):
for myfile in files:
if (myfile.endswith('.txt')):
print os.path.join(dirname,myfile)
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
6ca4acfa15bb6d2e30f65bb4f97c459c35411d48
|
8a1987a6c762d1440f7ce1b60e6dbb02491db9f1
|
/huntserver/migrations/0022_switch_to_utf8mb4_columns.py
|
aa1812f0fc8a5510d0598d437bea1ade6056f017
|
[
"MIT"
] |
permissive
|
dlareau/puzzlehunt_server
|
fd9807f074cbdc95ad46730e931da86a54b78f45
|
44f87cc5cfe8bb23a8e04fddee187b9056407741
|
refs/heads/master
| 2022-12-27T18:48:43.883587
| 2021-08-17T23:55:36
| 2021-08-17T23:55:36
| 37,299,424
| 20
| 23
|
MIT
| 2022-05-22T00:16:21
| 2015-06-12T03:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,041
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import sys
fwd_operations = [
'ALTER TABLE `auth_user` MODIFY `password` varchar(128) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `username` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `first_name` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `last_name` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `email` varchar(254) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `object_id` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `object_repr` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `change_message` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `hunt_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `location` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `template` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_huntassetfile` MODIFY `file` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_message` MODIFY `text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `phone` varchar(20) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `comments` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `allergies` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_id` varchar(8) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `answer` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `link` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_response` MODIFY `regex` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_response` MODIFY `text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_submission` MODIFY `submission_text` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_submission` MODIFY `response_text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `team_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `location` varchar(80) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `join_code` varchar(5) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content_type` varchar(3) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content` varchar(500) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
]
reverse_operations = [
'ALTER TABLE `auth_user` MODIFY `password` varchar(128) ',
'ALTER TABLE `auth_user` MODIFY `username` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `first_name` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `last_name` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `email` varchar(254) ',
'ALTER TABLE `django_admin_log` MODIFY `object_id` longtext ',
'ALTER TABLE `django_admin_log` MODIFY `object_repr` varchar(200) ',
'ALTER TABLE `django_admin_log` MODIFY `change_message` longtext ',
'ALTER TABLE `huntserver_hunt` MODIFY `hunt_name` varchar(200) ',
'ALTER TABLE `huntserver_hunt` MODIFY `location` varchar(100) ',
'ALTER TABLE `huntserver_hunt` MODIFY `template` longtext ',
'ALTER TABLE `huntserver_huntassetfile` MODIFY `file` varchar(100) ',
'ALTER TABLE `huntserver_message` MODIFY `text` varchar(400) ',
'ALTER TABLE `huntserver_person` MODIFY `phone` varchar(20) ',
'ALTER TABLE `huntserver_person` MODIFY `comments` varchar(400) ',
'ALTER TABLE `huntserver_person` MODIFY `allergies` varchar(400) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_name` varchar(200) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_id` varchar(8) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `answer` varchar(100) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `link` varchar(200) ',
'ALTER TABLE `huntserver_response` MODIFY `regex` varchar(400) ',
'ALTER TABLE `huntserver_response` MODIFY `text` varchar(400) ',
'ALTER TABLE `huntserver_submission` MODIFY `submission_text` varchar(100) ',
'ALTER TABLE `huntserver_submission` MODIFY `response_text` varchar(400) ',
'ALTER TABLE `huntserver_team` MODIFY `team_name` varchar(200) ',
'ALTER TABLE `huntserver_team` MODIFY `location` varchar(80) ',
'ALTER TABLE `huntserver_team` MODIFY `join_code` varchar(5) ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content_type` varchar(3) ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content` varchar(500) ',
]
def forwards(apps, schema_editor):
if not schema_editor.connection.vendor.startswith('mysql'):
return
for command in fwd_operations:
schema_editor.execute(command)
def backwards(apps, schema_editor):
if not schema_editor.connection.vendor.startswith('mysql'):
return
for command in fwd_operations:
schema_editor.execute(command)
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0021_auto_20180402_2224'),
]
operations = [
migrations.RunPython(forwards, backwards, atomic=False)
]
|
[
"flybye22@gmail.com"
] |
flybye22@gmail.com
|
4f1f35cd3e6246382f9d1003ac1d2188b27d3137
|
14a913fce4b538b22f28409645cd6abe3455808f
|
/bigquery_storage/to_dataframe/main_test.py
|
8335b437063c827cd6d43c4af690752455ae19dd
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
iamLoi/Python-Random-Number-Generator
|
8da7dbd37cb13a01232c8ed49b9df35a99c63d73
|
7579e8b15130802aaf519979e475c6c75c403eda
|
refs/heads/master
| 2022-08-29T19:05:32.649931
| 2019-09-14T14:48:58
| 2019-09-14T14:48:58
| 208,454,877
| 2
| 1
|
Apache-2.0
| 2022-08-05T21:57:49
| 2019-09-14T14:51:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,502
|
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.fixture
def clients():
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_create_client]
import google.auth
from google.cloud import bigquery
from google.cloud import bigquery_storage_v1beta1
# Explicitly create a credentials object. This allows you to use the same
# credentials for both the BigQuery and BigQuery Storage clients, avoiding
# unnecessary API calls to fetch duplicate authentication tokens.
credentials, your_project_id = google.auth.default(
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
# Make clients.
bqclient = bigquery.Client(
credentials=credentials,
project=your_project_id,
)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
# [END bigquerystorage_pandas_tutorial_create_client]
# [END bigquerystorage_pandas_tutorial_all]
return bqclient, bqstorageclient
def test_table_to_dataframe(capsys, clients):
from google.cloud import bigquery
bqclient, bqstorageclient = clients
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_table]
# Download a table.
table = bigquery.TableReference.from_string(
"bigquery-public-data.utility_us.country_code_iso"
)
rows = bqclient.list_rows(
table,
selected_fields=[
bigquery.SchemaField("country_name", "STRING"),
bigquery.SchemaField("fips_code", "STRING"),
],
)
dataframe = rows.to_dataframe(bqstorage_client=bqstorageclient)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_table]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "country_name" in out
def test_query_to_dataframe(capsys, clients):
bqclient, bqstorageclient = clients
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_query_results]
# Download query results.
query_string = """
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
"""
dataframe = (
bqclient.query(query_string)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_query_results]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "stackoverflow" in out
def test_session_to_dataframe(capsys, clients):
from google.cloud import bigquery_storage_v1beta1
bqclient, bqstorageclient = clients
your_project_id = bqclient.project
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_session]
table = bigquery_storage_v1beta1.types.TableReference()
table.project_id = "bigquery-public-data"
table.dataset_id = "new_york_trees"
table.table_id = "tree_species"
# Select columns to read with read options. If no read options are
# specified, the whole table is read.
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.selected_fields.append("species_common_name")
read_options.selected_fields.append("fall_color")
parent = "projects/{}".format(your_project_id)
session = bqstorageclient.create_read_session(
table,
parent,
read_options=read_options,
# This API can also deliver data serialized in Apache Avro format.
# This example leverages Apache Arrow.
format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW,
# We use a LIQUID strategy in this example because we only read from a
# single stream. Consider BALANCED if you're consuming multiple streams
# concurrently and want more consistent stream sizes.
sharding_strategy=(
bigquery_storage_v1beta1.enums.ShardingStrategy.LIQUID
),
)
# This example reads from only a single stream. Read from multiple streams
# to fetch data faster. Note that the session may not contain any streams
# if there are no rows to read.
stream = session.streams[0]
position = bigquery_storage_v1beta1.types.StreamPosition(stream=stream)
reader = bqstorageclient.read_rows(position)
# Parse all Avro blocks and create a dataframe. This call requires a
# session, because the session contains the schema for the row blocks.
dataframe = reader.to_dataframe(session)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_session]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "species_common_name" in out
|
[
"noreply@github.com"
] |
iamLoi.noreply@github.com
|
e6313fe1285bba0a56b05db426f9c6c6861bde1e
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/op_cmp_bool-94.py
|
6c2f718b85e4de8c6dbed4cf3746b6a697bd5dc4
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
print(True == True)
print(True == False)
print(False == True)
print(False == False)
print(True != True)
print(True != False)
print(False != True)
print($Exp)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
369376d38046b8a203a95ecd37ed2f1dfe746ebb
|
c585583d366924d8977462035b631161094241a9
|
/redbot/message/headers/_header.tpl
|
31561e68d04690bc85b5ca29ebfd1dc659b8138a
|
[
"MIT"
] |
permissive
|
QPC-database/redbot
|
a289248bbb24f8cc378001e38cb633e6f1aff098
|
f05dd7754cd6f6ba005ae44beeb8ed21516a93c8
|
refs/heads/main
| 2023-05-07T00:13:59.733511
| 2021-05-28T02:42:02
| 2021-05-28T02:42:02
| 382,878,629
| 1
| 0
|
MIT
| 2021-07-04T14:57:42
| 2021-07-04T14:57:42
| null |
UTF-8
|
Python
| false
| false
| 875
|
tpl
|
#!/usr/bin/env python
from redbot.message import headers
from redbot.speak import Note, categories, levels
from redbot.type import AddNoteMethodType
class SHORT_NAME(headers.HttpHeader):
canonical_name = "SHORT_NAME"
description = """\
FIXME
"""
reference = None
syntax = False
list_header = False
deprecated = False
valid_in_requests = False
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> ...:
return field_value
def evaluate(self, add_note: AddNoteMethodType) -> None:
return
class SHORT_NAME_NOTE(Note):
category = categories.GENERAL
level = levels.INFO
summary = "FIXME"
text = """\
FIXME"""
class SHORT_NAMETest(headers.HeaderTest):
name = 'SHORT_NAME'
inputs = ['FIXME']
expected_out = ('FIXME')
expected_err = [] # type: ignore
|
[
"mnot@mnot.net"
] |
mnot@mnot.net
|
cf363c988b6badef51b74389c99ca6acff643e5a
|
97543ae8e1ad7bf3d17dd87171aaac04f6737b5f
|
/bibliopixel/drivers/ledtype.py
|
b1a962f06ec533a9bbfeac352c4d4ccbe0cf78b5
|
[
"MIT"
] |
permissive
|
dr-aryone/BiblioPixel
|
a3c630bf1cd5db2b014b86775d283c61565a193e
|
fd97e6c651a4bbcade64733847f4eec8f7704b7c
|
refs/heads/master
| 2020-05-27T16:19:15.043592
| 2019-03-23T08:52:37
| 2019-03-25T11:10:39
| 188,698,414
| 2
| 1
|
MIT
| 2019-05-26T15:12:38
| 2019-05-26T15:12:37
| null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
from enum import IntEnum
class LEDTYPE(IntEnum):
"""Enumerated LED type names to be used with
:py:mod:`bibliopixel.drivers.serial` and
:py:mod:`bibliopixel.drivers.SPI`
"""
GENERIC = 0 # Use if the serial device only supports one chipset
LPD8806 = 1
WS2801 = 2
# These are all the same
WS2811 = 3
WS2812 = 3
WS2812B = 3
NEOPIXEL = 3
APA104 = 3
# 400khz variant of above
WS2811_400 = 4
TM1809 = 5
TM1804 = 5
TM1803 = 6
UCS1903 = 7
SM16716 = 8
APA102 = 9
SK9822 = 9
LPD1886 = 10
P9813 = 11
|
[
"adammhaile@gmail.com"
] |
adammhaile@gmail.com
|
edd2690a5b80bee5f27437cef21e1a4995e9a870
|
9a4755588bbe924270e0d92e04d3409281fbaf5b
|
/main/displayer.py
|
a0d3b2201298b0f2f77b759cc72098e188f45c3e
|
[] |
no_license
|
chaobai-li/authenticated-info-displayer
|
209488a8229d17b9d67371435e4aa576ef0bb0b3
|
c19c6d477a3b96cda3d65f1833d28ade07aff7ba
|
refs/heads/master
| 2021-01-25T11:48:49.936003
| 2018-03-03T11:59:48
| 2018-03-03T11:59:48
| 123,431,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
__pragma__("alias", "S", "$")
class Displayer:
def __init__(self, authenticator):
self.authenticator = authenticator
self.authenticator.eventLogin.append(self.__initialize)
self.authenticator.eventLogin.append(lambda: self.__toggle(True))
self.authenticator.eventLogout.append(lambda: self.__toggle(False))
self.initialized = False
def __toggle(self, v):
S('[data-auth-display-toggle]').toggle(v)
def __initialize(self):
if self.initialized: return
self.database = firebase.database()
interests = list(S('[data-auth-display]'))
for each in interests:
path = S(each).attr("data-auth-display")
template = S(each).attr("data-auth-display-template")
targetAttr = S(each).attr("data-auth-display-attribute")
useHtml = S(each).attr("data-auth-display-html")
self.__bindListener(each, path, template, targetAttr, useHtml)
self.initialized = True
def __bindListener(self, domObj, path, template, targetAttr, useHtml):
if not template:
template = "{}"
def updater(dbValue):
text = template.format(dbValue.val())
if targetAttr:
S(domObj).attr(targetAttr, text)
else:
if useHtml:
S(domObj).html(text)
else:
S(domObj).text(text)
self.database.ref(path).on("value", updater)
|
[
"contact@chaobai.li"
] |
contact@chaobai.li
|
78ac77bbaba347ba0643688428339f03ef0ddee3
|
02b04b202550248a2b78ed069d94b7607640c866
|
/DataTypes/Numbers/max.py
|
5c3f7d5ee91951b547605f887089c82a0ca3b66a
|
[] |
no_license
|
keshavkummari/python-nit-7am
|
c391fe96783c224b44419a258738168230e182cd
|
0bc867ad673e40ad401d7473aab4791f21ee1945
|
refs/heads/master
| 2020-03-30T15:05:18.376222
| 2018-11-05T02:30:44
| 2018-11-05T02:30:44
| 151,347,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
#!/usr/bin/python
"""9. max(x1, x2,...) : The largest of its arguments: the value closest to positive infinity
Python Number max() Method:"""
print "max(80, 100, 1000) : ", max(80, 100, 1000)
print "max(-20, 100, 400) : ", max(-20, 100, 400)
print "max(-80, -20, -10) : ", max(-80, -20, -10)
print "max(0, 100, -400) : ", max(0, 100, -400)
#max() Method; min() Method
print "max method", max(30, -30, 40, 50)
print "min method", min(30, -30, 40, 50)
#!/usr/bin/python
#10. min(x1, x2,...): The smallest of its arguments: the value closest to negative infinity
#Python Number min() Method:
print "min(-20, 100, 400) : ", min(-20, 100, 400)
print "min(80, 100, 1000) : ", min(80, 100, 1000)
print "min(-80, -20, -10) : ", min(-80, -20, -10)
print "min(0, 100, -400) : ", min(0, 100, -400)
|
[
"keshav.kummari@gmail.com"
] |
keshav.kummari@gmail.com
|
959b36ffc39fe17b4ec4cb1d925ad67bca447215
|
0452408a98e03408508b4889ed68a8d0f2d9f8cf
|
/alphatwirl/roottree/Events.py
|
dcd99a451aa2f0d07fe62f0b79f023eb3c2325ed
|
[
"BSD-3-Clause"
] |
permissive
|
benkrikler/alphatwirl
|
3318e79b89ce0e79c4a4399c7a40c789531f0e60
|
cda7d12fec21291ea33af23234fc08be19430934
|
refs/heads/master
| 2021-01-23T12:54:05.101466
| 2018-09-26T13:13:18
| 2018-09-26T13:13:18
| 93,210,643
| 0
| 0
|
BSD-3-Clause
| 2018-03-19T12:27:16
| 2017-06-02T23:18:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,629
|
py
|
# Tai Sakuma <tai.sakuma@gmail.com>
##__________________________________________________________________||
class Events(object):
"""An iterative object for events.
You can use this class to iterate over entries in a ROOT TTree.
You can instantiate this class with a TTree object and an
optionally a maximum number of entries to loop over::
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
events = Events(tree)
Then, the "for" loop for the tree entries can be::
for event in events:
Note: "event" and "events" are the same object. In each iteration,
"event" (and "events") is loaded with the next entry in the tree.
A content of the tree, e.g., a branch, can be accessed as an
attribute of "event"::
event.jet_pt
In order to access to a particular entry, you can use an index.
For example, to get 11th entry (the index for the first entry is
0)::
event = events[10]
Note: Again "event" and "events" are the same object.
"""
def __init__(self, tree, maxEvents=-1, start=0):
if start < 0:
raise ValueError("start must be greater than or equal to zero: {} is given".format(start))
self.tree = tree
nEventsInTree = self.tree.GetEntries()
start = min(nEventsInTree, start)
if maxEvents > -1:
self.nEvents = min(nEventsInTree - start, maxEvents)
else:
self.nEvents = nEventsInTree - start
self.maxEvents = maxEvents
self.start = start
self.iEvent = -1
def __len__(self):
return self.nEvents
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
self._repr_contents()
)
def _repr_contents(self):
return 'tree = {!r}, maxEvents={!r}, start={!r}, nEvents={!r}, iEvent={!r}'.format(
self.tree,
self.maxEvents,
self.start,
self.nEvents,
self.iEvent
)
def __getitem__(self, i):
if i >= self.nEvents:
self.iEvent = -1
raise IndexError("the index is out of range: " + str(i))
self.iEvent = i
self.tree.GetEntry(self.start + self.iEvent)
return self
def __iter__(self):
for self.iEvent in range(self.nEvents):
self.tree.GetEntry(self.start + self.iEvent)
yield self
self.iEvent = -1
def __getattr__(self, name):
return getattr(self.tree, name)
##__________________________________________________________________||
|
[
"tai.sakuma@gmail.com"
] |
tai.sakuma@gmail.com
|
584017b8c603df166692fd584d0144e09d4a261b
|
42a0befb594a6c081f3e788016c53889cfa2a9fb
|
/Codechef/factorial_easy_problem.py
|
e88719072ec1628291baff54eafc1b59d0b7f57f
|
[] |
no_license
|
Laksh8/competitive-programming
|
f436e8c525220ad95ef1c7a9d3aa98b4689d4f92
|
14c20e5cc32263c89a73524ab596efbbba2cc85a
|
refs/heads/master
| 2022-12-24T23:54:16.313515
| 2020-09-08T06:59:11
| 2020-09-08T06:59:11
| 293,727,288
| 2
| 1
| null | 2020-10-04T14:15:38
| 2020-09-08T06:57:35
|
Python
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
testcase = int(input())
while testcase > 0:
num = int(input())
sum=0
divisor=5
while (num)>=5:
num = num // divisor
sum = sum + num
print(sum)
testcase = testcase - 1
|
[
"lakshitkhanna311@gmail.com"
] |
lakshitkhanna311@gmail.com
|
cf550ea8b5a7f3638b9bec4ef4e8ec1e243f0ce3
|
3740de0d6e43ea140fc09ab314e4c492603ba185
|
/functions_legacy/FitVAR1.py
|
348409cfa3620731799498087218091ba4892c20
|
[
"MIT"
] |
permissive
|
s0ap/arpmRes
|
29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
refs/heads/master
| 2022-02-16T05:01:22.118959
| 2019-08-20T16:45:02
| 2019-08-20T16:45:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
import matplotlib.pyplot as plt
from numpy import ones, diff, eye
from RobustLassoFPReg import RobustLassoFPReg
def FitVAR1(X, p=None, nu=10**9, lambda_beta=0, lambda_phi=0, flag_rescale=0):
# This function estimates the 1-step parameters of the VAR[0] process via lasso regression (on first differences)
# INPUTS
# X : [matrix] (n_ x t_end) historical series of independent variables
# p : [vector] (1 x t_end) flexible probabilities
# nu : [scalar] degrees of freedom of multivariate Student t
# lambda_beta : [scalar] lasso regression parameter for loadings
# lambda_phi : [scalar] lasso regression parameter for covariance matrix
# flag_rescale : [boolean flag] if 0 (default), the series is not rescaled before estimation
# OPS
# output1 : [vector](n_ x 1) output1 = alpha
# output2 : [matrix](n_ x n_) output2 = b
# output3 : [matrix](n_ x n_) output3 = sig2_U
## Code
dX = diff(X,1,1)
n_, t_ = dX.shape
if p is None:
p = ones((1,t_))/t_
# robust lasso + glasso regression
alpha, beta, sig2_U = RobustLassoFPReg(dX, X[:,:-1], p, nu, 10**-6, lambda_beta, lambda_phi, flag_rescale)
output1 = alpha
output2 = (eye(n_)+beta)
output3 = sig2_U
return output1, output2, output3
|
[
"dario.popadic@yahoo.com"
] |
dario.popadic@yahoo.com
|
a86f4e04fd293b02902b13f84e13a6a1da39451e
|
8b2af3cff75ba2a6f8557cdea0d852b9076ff6a3
|
/day014/venv/Scripts/easy_install-script.py
|
765b7c6bbfc60d30e4d3962d2ae52fb465a43cb6
|
[] |
no_license
|
AlexYangLong/Foundations-of-Python
|
98e5eaf7e7348120049f1ff4bb3d31393ad05592
|
bcf3a1fe526140fd2b05283c104488698ebc99fd
|
refs/heads/master
| 2020-03-16T21:45:34.232670
| 2018-05-11T10:19:21
| 2018-05-11T10:19:21
| 133,013,526
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
#!"D:\for python\0413\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
|
[
"alex@alex.com"
] |
alex@alex.com
|
8c5bc5f4bd79e3341f8f0e73ae8eb742781ec259
|
4d05be863b63a56a90b4c46b15069827b33ecaae
|
/Algorithms/leetcode_charlie/001_two_sum.py
|
620d566dbd384fec815ccab50c6a4b01c5519abe
|
[] |
no_license
|
leeo1116/PyCharm
|
e532fa9754056019508cc454214ee1a8ad9b26a9
|
b6942c05c27556e5fe47879e8b823845c84c5430
|
refs/heads/master
| 2022-11-06T00:43:14.882453
| 2017-07-13T04:50:00
| 2017-07-13T04:50:00
| 36,851,636
| 0
| 1
| null | 2022-10-20T10:44:39
| 2015-06-04T06:09:09
|
Python
|
UTF-8
|
Python
| false
| false
| 946
|
py
|
__doc__ = """
Given an array of integers, find two numbers such that they add up to a specific target number.The function twoSum
should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Please note that your returned answers (both index1 and index2) are not zero-based.You may assume that each input would
have exactly one solution.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
class Solution(object):
def __init__(self, index):
self.index = index
def two_sum(self, nums, target):
print('#{0} Solution:\n'.format(self.index))
num_scanned = {}
for i, num in enumerate(nums):
if num_scanned.get(target-num, None) is not None:
return num_scanned[target-num]+1, i+1
else:
num_scanned[num] = i
s = Solution(1)
solution = s.two_sum([0, 4, 3, 0], 0)
print(solution)
|
[
"leeo1116@gmail.com"
] |
leeo1116@gmail.com
|
55d2099c22b2ef1df7eed3cdac7b86d9e3c15d97
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_230/ch33_2020_03_30_20_03_55_797460.py
|
27b68ff699defdc00de4fdca5d880421d1e22da1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
def eh_primo(n):
div=3
if n%2==0 and n!=2 or n==1 or n==0:
return False
while n > div:
if n%div==0:
return False
div +=2
return True
def primos_entre(a,b):
n_primos=0
while a<=b:
if eh_primo(a):
n_primos +=1
x+=1
return n_primos
|
[
"you@example.com"
] |
you@example.com
|
d8c80ee0a2954ef4a10f0ebfbf034248dcc2d365
|
a8fb5d37de019221e5897a98bd176c566037f813
|
/Playground/objgraph_/obj_graph.py
|
0890b1611b57af3cdb6b08c6f9339df38174a04e
|
[] |
no_license
|
tdworowy/PythonPlayground
|
b743dc2b870d1681b24e654e2af3fe5957710265
|
ff090fb44046c9c37501f5dbbcb08d56552540d4
|
refs/heads/master
| 2023-01-05T00:28:55.725894
| 2022-12-27T10:06:40
| 2022-12-27T10:06:40
| 72,983,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
import objgraph
class Staff:
def __init__(self, ele):
self.ele = ele
def get(self):
return self.ele
def example(count):
x = range(3)
y = [Staff(i) for i in x]
if count == 0:
return Staff(y)
else:
return example(count - 1)
def example2():
y = 1
for i in range(10):
y = Staff(y)
return y
def example3():
l = []
l1 = []
for x in range(7):
z = example(5)
q = example2()
l.append(z)
l.append(q)
l.append((z, q))
l1.append(l)
l.append(l1)
return Staff(l)
def test1():
objgraph.show_refs(example(3), filename="obj.png", refcounts=True)
def test2():
x = range(100)
y = map(example, x)
objgraph.show_refs(y, filename="obj2.png", refcounts=True)
def test3():
objgraph.show_refs(example2(), filename="obj3.png", refcounts=True, max_depth=5, too_many=10)
def test4():
"""Take lot of time"""
objgraph.show_refs(example3(), filename="obj4.png", refcounts=True, max_depth=10, too_many=100)
def test5():
objgraph.show_refs(example3(), filename="obj5.png", refcounts=True, max_depth=10, too_many=20)
if __name__ == "__main__":
test5()
|
[
"dworowytomasz@gmail.com"
] |
dworowytomasz@gmail.com
|
17b37f4a03a4049d3afd2397497d08fa832d5305
|
dcc62f725e8d1fdebc3be5192960584198d19813
|
/meiduo_mall/meiduo_mall/utils/category.py
|
535a51cb88960d742c97a2c71d02a628b6f21fb7
|
[] |
no_license
|
1923488289/youprojects
|
e51cbb7958963fb8a3a82405f5df18e9a066b1ee
|
ebd1856dab02e45db69d2d5307473f0f22855988
|
refs/heads/master
| 2022-12-11T12:40:55.832289
| 2019-09-24T15:31:34
| 2019-09-24T15:31:34
| 210,625,080
| 0
| 0
| null | 2022-12-08T01:49:05
| 2019-09-24T14:36:24
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
from goods.models import GoodsChannel
def get_category():
# 1.查询频道
channels = GoodsChannel.objects.order_by('group_id', 'sequence')
categories = {}
# 2.遍历频道,获取一级分类、二级分类数据
for channel in channels:
# 3.判断频道是否存在
if channel.group_id not in categories:
# 如果不存在则新建频道字典
categories[channel.group_id] = {
'channels': [], # 一级分类
'sub_cats': [] # 二级分类
}
# 3.1获取频道字典
channel_dict = categories[channel.group_id]
# 4.向频道中添加一级分类
channel_dict['channels'].append({
'name': channel.category.name, # 一级分类名称
'url': channel.url # 频道链接
})
# 5.向频道中添加二级分类
catetory2s = channel.category.subs.all()
# 6.遍历,逐个添加二级分类
for catetory2 in catetory2s:
channel_dict['sub_cats'].append({
'name': catetory2.name, # 二级分类名称
'sub_cats': catetory2.subs.all() # 三级分类
})
'''
{
1:{
'channels':[手机,相机,数码],
'sub_cats':[
{
'name':'手机通讯',
'sub_cats':[手机,游戏手机,..]
},
{
。。。。
}
]
},
2:{
'channels':[电脑,办公],
'sub_cats':[]
}
}
'''
return categories
|
[
"1923488289@qq.com"
] |
1923488289@qq.com
|
f8940de643087082e5912d2288535fcea3c528d7
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/1073585/snippet.py
|
3b5fdb9cca782aeebdcb2fd67a5527bed28bd730
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 306
|
py
|
def proc_starttime(pid):
p = re.compile(r"^btime (\d+)$", re.MULTILINE)
m = p.search(open("/proc/stat").read())
btime = int(m.groups()[0])
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
stime = int(open("/proc/%d/stat" % pid).read().split()[21]) / clk_tck
return btime + stime
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
3c84cf1aa382ae73435312ccf759eef54d752f84
|
845f627d3b28f88e7a5367ba8bf3b669cf5a6eae
|
/script/report/report.py
|
36a843d34ee338301436cb85b89184b33530581b
|
[] |
no_license
|
quanrd/nf-reseq-om
|
42e5066c99326c30e6aa650acbdc0ab2d4e52683
|
1ed90fff58fba5095f3454be07b803e82ced98b6
|
refs/heads/master
| 2022-11-18T22:03:49.556357
| 2020-01-06T06:40:13
| 2020-01-06T06:40:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,136
|
py
|
import os
import glob
import sys
import jinja2
import fire
import pandas as pd
from io import StringIO
from pathlib import Path, PurePath
pd.set_option('precision', 3)
script_dir, _ = os.path.split(os.path.abspath(__file__))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath='{}/template'.format(script_dir)
)
)
template = env.get_template('index.html')
# code that fills in display_dictionary with the values to send to the template
def format_map_df(map_df):
t_map_df = map_df.T
t_map_df.columns = [each.rstrip(':') for each in t_map_df.columns]
int_df = t_map_df.iloc[:, [0, 1, 4, 3]]
float_df = t_map_df.iloc[:, -3:]
int_df = int_df.astype('int')
int_df = int_df.applymap(lambda x: f'{x:,}')
clean_df = pd.concat([int_df, float_df], axis=1)
clean_df.index.name = 'Item'
return clean_df
def format_reads_df(reads_df):
int_df = reads_df.iloc[:, 0:4]
float_df = reads_df.iloc[:, -5:]
float_df = float_df.applymap(lambda x: f'{x:.3f}')
int_df = int_df.astype('int').applymap(lambda x: f'{x:,}')
clean_df = pd.concat([int_df, float_df], axis=1)
clean_df.index.name = 'Item'
return clean_df
def table2dict(table_file, name, sep='\t', format_func=None):
table_dict = dict()
if table_file.is_file():
table_df = pd.read_csv(table_file, sep=sep, index_col=0)
if format_func is not None:
table_df = format_func(table_df)
table_df.sort_index(inplace=True)
table_df = table_df.reset_index()
for idx_i in table_df.index:
table_dict.setdefault(
f'{name}_body', []).append(list(table_df.loc[idx_i]))
table_dict[f'{name}_header'] = list(table_df.columns)
if 'snp' in name:
table_dict['snp'] = True
return table_dict
def plot2report(plot_path, outpath, plot_flag, plot_name=None):
plot_dict = dict()
plots = glob.glob(str(plot_path))
outpath = PurePath(outpath)
if plots:
plot = plots[0]
plot_path = PurePath(plot)
if plot_name is None:
plot_name = plot_path.stem
outfile_path = outpath / f'{plot_name}{plot_path.suffix}'
os.system(f'cp {plot_path} {outfile_path}')
plot_dict[plot_flag] = True
return plot_dict
def plotlist2report(plot_list, outpath, plot_flag):
plot_dict = dict()
if plot_list:
for plot in plot_list:
os.system(f'cp {plot} {outpath}')
plot_dict[plot_flag] = [PurePath(each).name for each in plot_list]
return plot_dict
def exom_report(result_dir, proj_name, report_dir=None):
result_dir = Path(result_dir)
if report_dir is None:
report_dir = result_dir / 'report'
else:
report_dir = Path(report_dir)
if report_dir.is_dir():
os.system(f'rm -r {report_dir}')
display_dictionary = {}
display_dictionary['project_name'] = proj_name
# add fastqc table
qc_table = result_dir / 'qc/data.summary.csv'
display_dictionary.update(
table2dict(qc_table, 'seq', sep=',', format_func=format_reads_df))
# add aligment table
align_table = result_dir / 'alignment/mapping.summary.csv'
display_dictionary.update(
table2dict(
align_table, 'align', sep=',', format_func=format_map_df))
# snp stats
# summary
snp_summary_table = result_dir / 'snp/overall.varSummary.txt'
display_dictionary.update(
table2dict(snp_summary_table, 'snp_summary'))
snp_number_table = result_dir / 'snp/overall.varNum.txt'
display_dictionary.update(
table2dict(snp_number_table, 'snp_number'))
snp_impact_table = result_dir / 'snp/overall.varImpact.txt'
display_dictionary.update(
table2dict(snp_impact_table, 'snp_impact'))
snp_effect_table = result_dir / 'snp/overall.varEffects.txt'
display_dictionary.update(
table2dict(snp_effect_table, 'snp_effect'))
snp_region_table = result_dir / 'snp/overall.varRegion.txt'
display_dictionary.update(
table2dict(snp_region_table, 'snp_region'))
report_dir.mkdir(parents=True, exist_ok=True)
os.system('cp -r {script_dir}/template/* {report_dir}'.format(
script_dir=script_dir,
report_dir=report_dir
))
# plots
report_plot_path = report_dir / 'imgs'
mapping_plot = result_dir / 'plot/alignment/Mapping_stats.png'
display_dictionary.update(
plot2report(mapping_plot, report_plot_path, 'mapping_plot'))
# genome_cov_plot = result_dir / 'plot/alignment/Reads_coverage_genome.png'
# display_dictionary.update(
# plot2report(genome_cov_plot, report_plot_path, 'genome_cov_plot')
# )
exon_cov_plot = result_dir / 'plot/alignment/Reads_coverage_exon.png'
display_dictionary.update(
plot2report(exon_cov_plot, report_plot_path, 'exon_cov_plot')
)
variant_summary_plot = result_dir / \
'plot/variants/Variant_stats_summary.png'
if variant_summary_plot.exists():
display_dictionary.update(
plot2report(variant_summary_plot,
report_plot_path, 'variant_summary')
)
variant_summary_plot_dir = result_dir / 'plot/variants/'
for dir_i in variant_summary_plot_dir.iterdir():
if dir_i.is_dir():
example_sample = dir_i.name
varType_plot = glob.glob(f'{result_dir}/plot/variants/*/*_varType.png')
display_dictionary.update(
plotlist2report(varType_plot, report_plot_path,
'varType_plots'))
varRegion_plot = glob.glob(
f'{result_dir}/plot/variants/*/*_varRegion.png')
display_dictionary.update(
plotlist2report(varRegion_plot, report_plot_path,
'varRegion_plots'))
varImpact_plot = glob.glob(
f'{result_dir}/plot/variants/*/*_varImpact.png')
display_dictionary.update(
plotlist2report(varImpact_plot,
report_plot_path, 'varImpact_plots'))
# varEffects_high_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-HIGH.png'
# display_dictionary.update(
# plot2report(varEffects_high_plot, report_plot_path,
# 'variant_effect_high', 'varEffects-HIGH'))
# varEffects_moderate_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-MODERATE.png'
# display_dictionary.update(
# plot2report(varEffects_moderate_plot,
# report_plot_path,
# 'variant_effect_moderate', 'varEffects-MODERATE'))
# varEffects_low_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-LOW.png'
# display_dictionary.update(
# plot2report(varEffects_low_plot, report_plot_path,
# 'variant_effect_low', 'varEffects-LOW'))
# varEffects_modifier_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-MODIFIER.png'
# display_dictionary.update(
# plot2report(varEffects_modifier_plot,
# report_plot_path,
# 'variant_effect_modifier', 'varEffects-MODIFIER'))
# deltaSNP_plot = result_dir / 'mapping/*deltaSNP.png'
# Gprime_plot = result_dir / 'mapping/*Gprime.png'
# negLog10Pval_plot = result_dir / 'mapping/*negLog10Pval.png'
# plot2report(deltaSNP_plot, report_plot_path, 'deltaSNP')
# plot2report(Gprime_plot, report_plot_path, 'Gprime')
# plot2report(negLog10Pval_plot, report_plot_path, 'negLog10Pval')
# display_dictionary.update({'pca': True, 'snp_index': True})
display_html = template.render(display_dictionary)
report_html = report_dir / 'index.html'
with open(report_html, 'w') as out_inf:
out_inf.write(display_html)
os.system(f'tar -zcf {report_dir}.tar.gz -C {report_dir.parent} {report_dir.name}')
if __name__ == '__main__':
fire.Fire(exom_report)
|
[
"ricekent@163.com"
] |
ricekent@163.com
|
de693df1430585e4e82e8e60b7a7241ff863692c
|
20c979fc8a88dc893692c3d83c9907c928c78074
|
/prog9.py
|
0b026f3b1e73d7694c124def464e67c57bac49f8
|
[] |
no_license
|
ParulProgrammingHub/assignment-1-kheniparth1998
|
57edba326325af3b6dfbc6aea59e701ff5634d6c
|
8c277dfb8c4a4cdf25ad7f1851d1247a6a3dc86d
|
refs/heads/master
| 2021-01-19T09:14:42.309237
| 2017-02-15T17:24:07
| 2017-02-15T17:24:07
| 82,086,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
a=input("enter maximum marks for a subject: ")
total_marks=a*5.0
sub1=input("enter marks of subject 1: ")
sub2=input("enter marks of subject 2: ")
sub3=input("enter marks of subject 3: ")
sub4=input("enter marks of subject 4: ")
sub5=input("enter marks of subject 5: ")
obtain_marks=sub1+sub2+sub3+sub4+sub5
avg_marks=obtain_marks/5.0
percent=(obtain_marks*100)/total_marks
print "average is :",avg_marks
print "percentage is :",percent
if percent<35:
print "FAIL"
else:
print "PASS"
|
[
"noreply@github.com"
] |
ParulProgrammingHub.noreply@github.com
|
7c3a5292dbdf6072cb25a109cfdd13c7983d7548
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/common/types/click_location.py
|
89822bb622bd68c49742d2af6245686e64e28b2b
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.common',
marshal='google.ads.googleads.v7',
manifest={
'ClickLocation',
},
)
class ClickLocation(proto.Message):
r"""Location criteria associated with a click.
Attributes:
city (str):
The city location criterion associated with
the impression.
country (str):
The country location criterion associated
with the impression.
metro (str):
The metro location criterion associated with
the impression.
most_specific (str):
The most specific location criterion
associated with the impression.
region (str):
The region location criterion associated with
the impression.
"""
city = proto.Field(
proto.STRING,
number=6,
optional=True,
)
country = proto.Field(
proto.STRING,
number=7,
optional=True,
)
metro = proto.Field(
proto.STRING,
number=8,
optional=True,
)
most_specific = proto.Field(
proto.STRING,
number=9,
optional=True,
)
region = proto.Field(
proto.STRING,
number=10,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
6c32637f146447fc95c3941386d8534c7c68f874
|
73d9e70adfbc6043ecdb8de2ea1b2339007ea5e9
|
/tests/features/stdin_input_steps.py
|
3256a36e7707594433db8cc6b255a844a6491819
|
[
"Apache-2.0"
] |
permissive
|
cheesinglee/bigmler
|
e147df8d98bcc0624b325fccf381577e74e62b1e
|
cda58f6149e211897c931300083c6b1b3686ff11
|
refs/heads/master
| 2020-04-06T07:01:11.195760
| 2015-02-12T23:14:31
| 2015-02-12T23:14:31
| 20,578,762
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
import os
import time
import csv
import json
from lettuce import step, world
from subprocess import check_call, CalledProcessError
from bigml.api import check_resource
from bigmler.checkpoint import file_number_of_lines
from common_steps import check_debug
from basic_test_prediction_steps import shell_execute
@step(r'I create BigML resources uploading train "(.*)" file to test "(.*)" read from stdin and log predictions in "(.*)"$')
def i_create_all_resources_to_test_from_stdin(step, data=None, test=None, output=None):
if data is None or test is None or output is None:
assert False
command = ("cat " + test + "|bigmler --train " + data +
" --test --store --output " + output + " --max-batch-models 1")
shell_execute(command, output, test=test)
@step(r'I create a BigML source from stdin using train "(.*)" file and logging in "(.*)"$')
def i_create_source_from_stdin(step, data=None, output_dir=None):
if data is None or output_dir is None:
assert False
command = ("cat " + data + "|bigmler --train " +
"--store --no-dataset --no-model --output-dir " +
output_dir + " --max-batch-models 1")
shell_execute(command, output_dir + "/test", test=None)
|
[
"merce@bigml.com"
] |
merce@bigml.com
|
28d39c6dea81506b8dd1e1f53230c70c25166d80
|
6536e42c9a336c80d370d7f07cc4260e4055f683
|
/wsgitest.py
|
f50c5fd0888492f0b48b4b2933233864a3b2cb8a
|
[
"BSD-2-Clause"
] |
permissive
|
jonashaag/WSGITest
|
4ca01144b6217b4769020c0597d075dd03d4549a
|
fb6f6981f8cc8192b2207a803c078a03bab31a84
|
refs/heads/master
| 2020-05-20T09:41:52.598794
| 2011-01-17T18:09:21
| 2011-01-17T18:09:21
| 818,200
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
import optparse
parser = optparse.OptionParser()
parser.add_option('-d', dest='default_tests',
action='store_true', default=None)
if __name__ == '__main__':
from wsgitest.run import run_tests
options, files = parser.parse_args()
if not files:
if options.default_tests is None:
options.default_tests = True
if options.default_tests:
from wsgitest import DEFAULT_TESTS_DIR
files.append(DEFAULT_TESTS_DIR)
result = run_tests(files)
print result.summary()
|
[
"jonas@lophus.org"
] |
jonas@lophus.org
|
e2ff3e605f5d643adb4a22ce53b2aa918cf781f4
|
e5c9fc4dc73536e75cf4ab119bbc642c28d44591
|
/src/leetcodepython/array/day_week_1185.py
|
d66cac9a636edc8be0acebb4fb26b98b46b0000b
|
[
"MIT"
] |
permissive
|
zhangyu345293721/leetcode
|
0a22034ac313e3c09e8defd2d351257ec9f285d0
|
50f35eef6a0ad63173efed10df3c835b1dceaa3f
|
refs/heads/master
| 2023-09-01T06:03:18.231266
| 2023-08-31T15:23:03
| 2023-08-31T15:23:03
| 163,050,773
| 101
| 29
| null | 2020-12-09T06:26:35
| 2018-12-25T05:58:16
|
Java
|
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
# -*- coding:utf-8 -*-
'''
/**
* This is the solution of No. 1185 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/day-of-the-week
*
* The description of problem is as follow:
* ==========================================================================================================
* 给你一个日期,请你设计一个算法来判断它是对应一周中的哪一天。
*
* 输入为三个整数:day、month 和 year,分别表示日、月、年。
*
* 您返回的结果必须是这几个值中的一个 {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}。
* 示例 1:
*
* 输入:day = 31, month = 8, year = 2019
* 输出:"Saturday"
* 示例 2:
*
* 输入:day = 18, month = 7, year = 1999
* 输出:"Sunday"
* 示例 3:
*
* 输入:day = 15, month = 8, year = 1993
* 输出:"Sunday"
*
* 来源:力扣(LeetCode)
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
import datetime
class Solution:
def day_of_the_week(self, day: int, month: int, year: int) -> str:
'''
判断一天是星期几?
Args:
day:天
month:月
year:年
Returns:
年字符串
'''
lis = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
dic = dict(enumerate(lis))
w = datetime.date(year, month, day)
return dic[w.weekday()]
if __name__ == '__main__':
y, m, d = 2020, 2, 8
solution = Solution()
day_of_week = solution.day_of_the_week(d, m, y)
print(day_of_week)
assert day_of_week=='Saturday'
|
[
"zhangyu_xtb@geekplus.cc"
] |
zhangyu_xtb@geekplus.cc
|
fdaa58c6c8d679b9e5214b8455713c37be838bfc
|
fa097257d8ec4167db24b17076a38e60dbbb0b36
|
/Code/27. Quadratic primes.py
|
fe1e7e6ad3f76dd53febf405107f55c1dfee5f04
|
[] |
no_license
|
SergeyShk/Project-Euler
|
5e0d5bb3f03e2baaa25bd895f53603026fb147c7
|
6f3019ca88a545bf85e714526aa6ca661f89e4a9
|
refs/heads/master
| 2021-08-16T03:04:28.000466
| 2020-04-15T20:13:29
| 2020-04-15T20:13:29
| 159,189,991
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
'''
Euler discovered the remarkable quadratic formula:
n^2+n+41
It turns out that the formula will produce 40 primes for the consecutive integer values 0≤n≤39. However, when n=40,40^2+40+41=40(40+1)+41 is divisible by 41, and certainly when n=41,41^2+41+41 is clearly divisible by 41.
The incredible formula n^2−79n+1601 was discovered, which produces 80 primes for the consecutive values 0≤n≤79. The product of the coefficients, −79 and 1601, is −126479.
Considering quadratics of the form:
n^2+an+b, where |a|<1000 and |b|≤1000
where |n| is the modulus/absolute value of n
e.g. |11|=11 and |−4|=4
Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n=0.
'''
def problem_27(a, b):
max_primes = {'a': 0, 'b': 0, 'n': 0}
for a in range(-1 * a, a + 1):
for b in range(-1 * b, b + 1):
n = 0
while True:
num = n**2 + a * n + b
if len([i for i in range(2, round(abs(num)**0.5) + 1) if num % i == 0]) == 0:
n += 1
else:
break
if n > max_primes['n']:
max_primes['a'] = a
max_primes['b'] = b
max_primes['n'] = n
return max_primes['a'] * max_primes['b']
print(problem_27(1000, 1000))
|
[
"kouki.sergey@gmail.com"
] |
kouki.sergey@gmail.com
|
3f2d82a98c7780670df6738341d6c92a64e95c4f
|
8b71aaab38dbe1adac0c3dfa97bd39997272e0d1
|
/main.py
|
35138ba0d0bd7dd7909fbd7bb9db47ccdd44538f
|
[
"MIT"
] |
permissive
|
sreekesari-vangeepuram/visual-card-generator
|
39486d0d0565d8400c3d1e4f2b6f77ea8a1d2add
|
f39b253c21d98119e44ab741d992bde7987354c3
|
refs/heads/main
| 2023-07-16T17:03:04.148380
| 2021-09-07T15:41:33
| 2021-09-07T15:41:33
| 339,816,805
| 1
| 1
| null | 2021-08-11T08:28:56
| 2021-02-17T18:22:34
|
Python
|
UTF-8
|
Python
| false
| false
| 5,388
|
py
|
"""
Copyright © 2021
Vangeepuram Sreekesari
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sys import argv
from PIL import Image, ImageDraw, ImageFilter, ImageFont
# If you are developing an API
# just change the parameters
# in your convenient way!
global vc_size, profile_pic_size, overlay_location, watermark_location, uname_fntsz, other_fntsz
vc_size, profile_pic_size = (<int>, <int>), (<int>, <int>)
overlay_location = (vc_size[0] // 2 - profile_pic_size[0] // 2,
vc_size[1] // 2 - profile_pic_size[1] // 2)
uname_fntsz, other_fntsz = <int>, <int>
profile_pic_path = argv[1]
color = argv[2]
# --------------------------------------------------
username = "<username>"
user_handle = f"@{'<userhandle>'}"
user_location = "<user-location>"
# --------------------------------------------------
def crop_center(pil_img, crop_width, crop_height):
img_width, img_height = pil_img.size
offset = 5
return pil_img.crop(((img_width - crop_width) // 2 + offset,
(img_height - crop_height) // 2 + offset,
(img_width + crop_width) // 2 + offset,
(img_height + crop_height) // 2 + offset))
crop_max_square = lambda pil_img: crop_center(pil_img, min(pil_img.size), min(pil_img.size))
def mask_circle_transparent(pil_img, blur_radius, offset=0):
"Returns a card after masking the profile pic"
offset += blur_radius * 2
mask = Image.new("L", pil_img.size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((offset, offset, pil_img.size[0] - offset, pil_img.size[1] - offset), fill = 255)
mask = mask.filter(ImageFilter.GaussianBlur(blur_radius)) # Filtering the mask
result = pil_img.copy() # Buffer of same type to add alpha-gradient with mask
result.putalpha(mask)
return result
def render_text(image, text, text_location, font_size):
"Returns a card by rendering the given text"
card = ImageDraw.Draw(image)
font_path = "./etc/font.ttf"
if "|" not in text:
card.text(text_location, text, font=ImageFont.truetype(font_path, font_size))
else:
card.text(text_location, text.split("|")[0], font=ImageFont.truetype(font_path, font_size))
width, height = card.textsize(text.split("|")[0], font=ImageFont.truetype(font_path, font_size))
n_spaces = width // len(text.split("|")[0]) + 2 # since word-size is diff. based on font-style
card.text((text_location[0] + width + n_spaces, text_location[1] + height // 5),
text.split("|")[1], font=ImageFont.truetype(font_path, other_fntsz))
return image
def create_broder(image, y):
"Returns a card by rendering border line to text"
card = ImageDraw.Draw(image)
x1, x2 = 0, vc_size[0] # To vary the length of the border-line
y1 = y2 = y # To drag down the border-line
line_segment, line_color = [(x1, y1), (x2, y2)], (255,255,255,128)
card.line(line_segment, fill = line_color, width=1)
return image
def stamp_watermark(image, filepath_of_watermark):
"Returns the card by stamping the watermark at bottom right corner"
offset = 10 # Distance between image border and watermark
watermark = Image.open(filepath_of_watermark).convert("RGBA")
wm_size = (watermark.size[0] // (offset + 5), watermark.size[1] // (offset + 5))
watermark = watermark.resize(wm_size)
watermark_location = (vc_size[0] - wm_size[0] - offset,
vc_size[1] - wm_size[1] - offset) # Bottom right corner
image.paste(watermark, watermark_location, mask=watermark)
watermark.close()
return image
visual_card = Image.new("RGBA", vc_size, color)
visual_card = stamp_watermark(visual_card, "<watermark-filepath>")
profile_pic = Image.open(profile_pic_path)
profile_pic = crop_max_square(profile_pic).resize((profile_pic_size), Image.LANCZOS)
# In fn-call of `mask_circle_transparent`, increase 2nd arg to create blur effect at border
profile_pic = mask_circle_transparent(profile_pic, 0)
visual_card.paste(profile_pic, overlay_location, mask=profile_pic) # Overlay profile-pic on visual-card
visual_card = render_text(visual_card, f'{username}|{user_handle}', (uname_fntsz - 10, 10), uname_fntsz)
visual_card = render_text(visual_card, user_location, (uname_fntsz - 10, 35), other_fntsz)
visual_card = create_broder(visual_card, 60)
visual_card.show()
#visual_card.save("./visual_card.png")
|
[
"kesari.vangeepuram@gmail.com"
] |
kesari.vangeepuram@gmail.com
|
24628a937c4bb015580dcf7db437fbac6c5eb40d
|
13696a9691b173d75b11b4aee22b79d4ea6b7c0b
|
/test/test_order_line_item.py
|
0f2cd0135271ef7e6096299470663b97c0befed0
|
[
"Apache-2.0"
] |
permissive
|
square/connect-python-sdk
|
410613bc4b04f0f70176275591a16c9e49e25ede
|
e00e2889b2dd2c55048219cbe64db79962a68633
|
refs/heads/master
| 2023-06-15T09:24:17.190416
| 2019-08-15T17:44:41
| 2019-08-15T17:44:41
| 64,772,029
| 53
| 45
|
Apache-2.0
| 2020-12-20T18:41:31
| 2016-08-02T16:07:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
# coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.models.order_line_item import OrderLineItem
class TestOrderLineItem(unittest.TestCase):
""" OrderLineItem unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testOrderLineItem(self):
"""
Test OrderLineItem
"""
model = squareconnect.models.order_line_item.OrderLineItem()
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
square.noreply@github.com
|
1e616727b698fb933c3679722bfecdc53bf353af
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/kid/way_team_business/family/face_issue_city/number/year/health_kerberos_back.py
|
655af272c1deb6d1ca6522de2970d1ca8ae96cfa
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,934
|
py
|
using System;
using System.Net.Http;
using System.Text;
using System.Threading.Tasks;
// Install Newtonsoft.Json with NuGet
using Newtonsoft.Json;
namespace translate_sample
{
class Program
{
private const string key_var = "TRANSLATOR_TEXT_SUBSCRIPTION_KEY";
private static readonly string subscriptionKey = "9264e06fc74e856e6ad7039efbb924c4";
private const string endpoint_var = "TRANSLATOR_TEXT_ENDPOINT";
private static readonly string endpoint = Environment.GetEnvironmentVariable(endpoint_var);
static Program()
{
if (null == subscriptionKey)
{
throw new Exception("Please set/export the environment variable: " + key_var);
}
if (null == endpoint)
{
throw new Exception("Please set/export the environment variable: " + endpoint_var);
}
}
// The code in the next section goes here.
// This sample requires C# 7.1 or later for async/await.
// Async call to the Translator Text API
static public async Task TranslateTextRequest(string subscriptionKey, string endpoint, string route, string inputText)
{
object[] body = new object[] { new { Text = inputText } };
var requestBody = JsonConvert.SerializeObject(body);
using (var client = new HttpClient())
using (var request = new HttpRequestMessage())
{
// Build the request.
// Set the method to Post.
request.Method = HttpMethod.Post;
// Construct the URI and add headers.
request.RequestUri = new Uri(endpoint + route);
request.Content = new StringContent(requestBody, Encoding.UTF8, "application/json");
request.Headers.Add("a19ae802003b91b483269a2d3ca373a1", subscriptionKey);
// Send the request and get response.
HttpResponseMessage response = await client.SendAsync(request).ConfigureAwait(false);
// Read response as a string.
string result = await response.Content.ReadAsStringAsync();
// Deserialize the response using the classes created earlier.
TranslationResult[] deserializedOutput = JsonConvert.DeserializeObject<TranslationResult[]>(result);
// Iterate over the deserialized results.
foreach (TranslationResult o in deserializedOutput)
{
// Print the detected input language and confidence score.
Console.WriteLine("Detected input language: {0}\nConfidence score: {1}\n", o.DetectedLanguage.Language, o.DetectedLanguage.Score);
// Iterate over the results and print each translation.
foreach (Translation t in o.Translations)
{
Console.WriteLine("Translated to {0}: {1}", t.To, t.Text);
}
}
}
}
static async Task Main(string[] args)
{
// This is our main function.
// Output languages are defined in the route.
// For a complete list of options, see API reference.
// https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate
string route = "/translate?api-version=3.0&to=de&to=it&to=ja&to=th";
// Prompts you for text to translate. If you'd prefer, you can
// provide a string as textToTranslate.
Console.Write("Type the phrase you'd like to translate? ");
string textToTranslate = Console.ReadLine();
await TranslateTextRequest(subscriptionKey, endpoint, route, textToTranslate);
Console.WriteLine("Press any key to continue.");
Console.ReadKey();
}
}
}
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
91c25e9e1439da3790676816b093c0f9a27f9de5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03559/s351538346.py
|
07b30fc56f836903884b21d2c40f39a3645029b7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
import bisect
N=int(input().strip())
a=list(map(int, input().split()))
b=list(map(int, input().split()))
c=list(map(int, input().split()))
a.sort()
#b.sort(reverse=True)
c.sort()
ans=0
#for i in range(len(b)):
# la=bisect.bisect_left(a, b[i])
# ra=bisect.bisect_right(c, b[i])
# ans+=la*(len(c)-ra)
# print(la,(len(c)-ra))
for i in range(len(b)):
la=bisect.bisect_left(a, b[i])
ra=bisect.bisect_right(c, b[i])
#print(la*(len(c)-ra))
ans+=la*(len(c)-ra)
#print(ans)
#la=bisect.bisect_left(a, 8)
#ra=bisect.bisect_right(a, 8)
#print(la,len(a)-ra)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6b0f05b24305838a791d7539f7b5f6e7fa6c8395
|
850d778687e3692ab2a38d4d2227391d92c21e6b
|
/atcoder.jp/arc008/arc008_4/Main.py
|
1bc7f5184d2c991ab49a12ce1a26ad20d78090fc
|
[] |
no_license
|
Valkyrja3607/AtCoder
|
77e2e5e66c0e8e12bb902c35f679119c6576fad7
|
9218a50b1eb83e4498845d15d9dda41fab90ed73
|
refs/heads/master
| 2023-07-15T20:38:52.911301
| 2018-05-30T17:56:22
| 2018-05-30T17:56:22
| 294,980,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
n,m=[int(j) for j in input().split()]
pab=[[float(j) for j in input().split()] for i in range(m)]
ll=[]
for p,a,b in pab:
ll.append(p)
ll.sort()
from collections import Counter
l=Counter()
for i in range(m):
l[ll[i]]=i
#A[k]をxに変更 O(logN)
def update(k,x,y):
k += num
seg_min[k] = x
seg_max[k] = y
while k>1:
k //= 2
seg_min[k] = seg_min[k*2]*seg_min[k*2+1]
seg_max[k] = seg_max[k*2]*seg_min[k*2+1]+seg_max[k*2+1]
num=2**(m-1).bit_length()
seg_min=[1]*2*num
seg_max=[0]*2*num
ans1=1
ans2=1
for p,a,b in pab:
update(l[p],a,b)
t=seg_min[1]+seg_max[1]
ans1=min(ans1,t)
ans2=max(ans2,t)
print(ans1)
print(ans2)
|
[
"purinjolly@gmail.com"
] |
purinjolly@gmail.com
|
925db904b3e3bce5fd3d07069328ae9b575f7401
|
1e99d202f94d26d8af5405a8c8284a5ffc345bba
|
/user/models.py
|
09b73ff470a86207cf8f06d317e689aca1d5b450
|
[] |
no_license
|
madlad33/drf_pythonmate
|
889b6a057ab9ac60b1e1138c2eb4ebc96d873e7c
|
0b47ed41e847b0e0a7920e008867cdf971bddd6c
|
refs/heads/master
| 2023-02-18T09:02:35.313419
| 2021-01-11T17:03:19
| 2021-01-11T17:03:19
| 328,583,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.base_user import BaseUserManager
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self,email,password=None,**extrafields):
if not email:
raise ValueError("Email is a required field")
user = self.model(email=self.normalize_email(email),**extrafields)
user.set_password(password)
user.save(using= self._db)
return user
def create_superuser(self,email,password,**extrafields):
user = self.create_user(email,password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class CustomUser(AbstractUser):
username = None
email = models.EmailField(max_length=255,unique=True)
USERNAME_FIELD = 'email'
objects = UserManager()
REQUIRED_FIELDS = ['password']
def __str__(self):
return self.email
class Client(models.Model):
user = models.OneToOneField(CustomUser,on_delete=models.CASCADE)
|
[
"tanmay.milky33@gmail.com"
] |
tanmay.milky33@gmail.com
|
36b8165874527ec6b6a038f2526a3b40284cad6c
|
80075edf813fa1c7ef3126b153e7ab2f6c42f0be
|
/xml/Reading_XML_File_From_Python_Code.py
|
992076a09487b050d0949ebe21ed811ab5e7f2c2
|
[] |
no_license
|
keshavkummari/python-nit-930pm
|
a7e16701d981145f8fdc27e741169ef76616bc8a
|
aa3bb7654c091e3d04098483525768e287604c38
|
refs/heads/master
| 2020-04-01T15:00:20.366890
| 2018-11-29T17:14:41
| 2018-11-29T17:14:41
| 153,316,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
# A simple XML file, later parse it with Python minidom.
'''
staff.xml
<?xml version="1.0"?>
<company>
<name>Mkyong Enterprise</name>
<staff id="1001">
<nickname>mkyong</nickname>
<salary>100,000</salary>
</staff>
<staff id="1002">
<nickname>yflow</nickname>
<salary>200,000</salary>
</staff>
<staff id="1003">
<nickname>alex</nickname>
<salary>20,000</salary>
</staff>
</company>
'''
"""
<?xml version="1.0"?>
<company>
<name>Online Ucator</name>
<staff id="1001">
<nickname>Minnu</nickname>
<salary>100,000</salary>
</staff>
<staff id="1002">
<nickname>Keshav</nickname>
<salary>200,000</salary>
</staff>
<staff id="1003">
<nickname>Jessi</nickname>
<salary>20,000</salary>
</staff>
</company>
"""
#2. DOM Example 1
#A simple Python minidom example.
# dom-example.py
from xml.dom import minidom
doc = minidom.parse("staff.xml")
# doc.getElementsByTagName returns NodeList
name = doc.getElementsByTagName("name")[0]
print(name.firstChild.data)
staffs = doc.getElementsByTagName("staff")
for staff in staffs:
sid = staff.getAttribute("id")
nickname = staff.getElementsByTagName("nickname")[0]
salary = staff.getElementsByTagName("salary")[0]
print("id:%s, nickname:%s, salary:%s" %
(sid, nickname.firstChild.data, salary.firstChild.data))
|
[
"keshav.kummari@gmail.com"
] |
keshav.kummari@gmail.com
|
9a5c1c8d4f358589a5a518cc0e191b06f084541c
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/Accounting/FPythonCode/FAccountingRollForward.py
|
aadcf523a93669d6ecde20daab2b4b7f22aa7ead
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,284
|
py
|
""" Compiled: 2020-09-18 10:38:46 """
#__src_file__ = "extensions/accounting/etc/FAccountingRollForward.py"
# operations
from FOperationsCollectionUtils import PopObject
# accounting
from FAccountingEngineEOFY import IAccountingEOFYEngine
from FAccountingCreation import CreateRollForwardJournals
from FAccountingPairReverser import PerformCancellation
from FAccountingReader import ReadRollForwardPairs
from FAccountingDRCRPairGenerator import GenerateDRCRPairs
from FAccountingCalculations import IsAmountZero
#-------------------------------------------------------------------------
class BalanceRollForward(IAccountingEOFYEngine.IRollForwardProvider):
#-------------------------------------------------------------------------
def __init__(self, fiscalYear):
self.__provider = None
self.__toRollForwardBalances = dict()
#-------------------------------------------------------------------------
def PO_Init(self, provider):
self.__provider = provider
#-------------------------------------------------------------------------
def PO_Clear(self):
self.__toRollForwardBalances.clear()
#-------------------------------------------------------------------------
def RFP_IsValidForRollForward(self, balance):
return balance.ChartOfAccount().HasActiveRollForwardTAccount() and \
(not IsAmountZero(balance.Amount()) or not IsAmountZero(balance.BaseAmount()))
#-------------------------------------------------------------------------
def RFP_AddForRollForward(self, key, balance):
self.__toRollForwardBalances[key] = balance
#-------------------------------------------------------------------------
def RFP_RollForward(self, book, fiscalYear, endPeriod, keyFunc):
accountMapper = self.__provider.LKMP_TAccountLedgerKeyMapper()
oldPairs = dict((self.__FindKey(pair, keyFunc), pair) for pair in ReadRollForwardPairs(book, fiscalYear))
for key, balance in self.__toRollForwardBalances.items():
oldPair = PopObject(oldPairs, key)
rollforwardAmount, rollforwardBaseAmount = self.__CalculateRollForwardAmount(balance, oldPair)
newPair = next(GenerateDRCRPairs(CreateRollForwardJournals(rollforwardAmount, rollforwardBaseAmount, balance, endPeriod, accountMapper), True))
self.__ProcessPairs(oldPair, newPair, keyFunc, balance.AccountingPeriod())
#-------------------------------------------------------------------------
def __FindKey(self, pair, keyFunc):
for journal in pair.Journals():
if journal.Account().RollForwardTAccount():
return keyFunc(journal)
return None
#-------------------------------------------------------------------------
def __CalculateRollForwardAmount(self, balance, oldPair):
rollforwardAmount = balance.Amount()
rollforwardBaseAmount = balance.BaseAmount()
if oldPair:
for journal in oldPair.Journals():
if journal.Balance() == balance.Original():
rollforwardAmount -= journal.Amount()
rollforwardBaseAmount -= journal.BaseAmount()
return rollforwardAmount, rollforwardBaseAmount
#-------------------------------------------------------------------------
def __ProcessPairs(self, oldPair, newPair, keyFunc, startPeriod):
if newPair and oldPair:
connectedPairs = [pair for pair in PerformCancellation(oldPair, None, None)]
connectedPairs.append(newPair)
for pair in connectedPairs:
self.__SetBalanceRef(pair, keyFunc, startPeriod)
self.__provider.STPUP_AddConnectedPairsForUpdate(connectedPairs)
elif newPair:
self.__SetBalanceRef(newPair, keyFunc, startPeriod)
self.__provider.STPUP_AddPairForUpdate(newPair)
#-------------------------------------------------------------------------
def __SetBalanceRef(self, pair, keyFunc, startPeriod):
for journal in pair.Journals():
key = keyFunc(journal)
balanceForKey = self.__provider.BC_GetOrCreateBalanceForKey(key, journal, startPeriod)
journal.Balance(balanceForKey)
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
bab242cced1e1ad5251f1876544fa92f2c8f4c73
|
8ee5dcbdbd407eb5f294d430813b16eca22f571c
|
/data/HW3/hw3_359.py
|
802a68fe38d1f46796648e9870bd99992298710a
|
[] |
no_license
|
MAPLE-Robot-Subgoaling/IPT
|
5e60e2ee4d0a5688bc8711ceed953e76cd2ad5d9
|
f512ea3324bfdceff8df63b4c7134b5fcbb0514e
|
refs/heads/master
| 2021-01-11T12:31:00.939051
| 2018-08-13T23:24:19
| 2018-08-13T23:24:19
| 79,373,489
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
temp = float(input("Please enter the temperature: "))
scale = input("Please enter 'C' for Celsius, or 'K' for Kelvin: ")
MELTING_POINT_C = 32
BOILING_POINT_C = 100
MELTING_POINT_K = 273.15
BOILING_POINT_K = 373.15
def main():
if scale == "C":
if temp >= 0 and temp < MELTING_POINT_C:
print("At this temperature, water is a (frozen) solid.")
elif temp >= MELTING_POINT_C and temp < BOILING_POINT_C:
print("At this temperature, water is a liquid.")
elif temp >= BOILING_POINT_C:
print("At this temperature, water is a gas.")
else:
if temp >= 0 and temp < MELTING_POINT_K:
print("At this temperature, water is a (frozen) solid.")
elif temp >= MELTING_POINT_K and temp < BOILING_POINT_K:
print("At this temperature, water is a liquid.")
elif temp >= BOILING_POINT_K:
print("At this temperature, water is a gas.")
main()
|
[
"mneary1@umbc.edu"
] |
mneary1@umbc.edu
|
bd3f59a9d11388780a80aad702971349799580c5
|
0320ac4a623f9153468952a64af9093430801dcb
|
/tests/callbacks/learning_rate_test.py
|
9503c866bcca0a3f3d20446c89f8d9a9d3d4676a
|
[
"MIT"
] |
permissive
|
carmocca/PyLaia
|
330629610569f9347de5cb3eb479c2ed5abaceb6
|
65b0dde6211f96d061ce6264e50ba316e8f0e7f3
|
refs/heads/master
| 2023-02-25T06:23:51.755052
| 2021-01-24T13:16:48
| 2021-01-24T13:16:48
| 277,486,482
| 1
| 1
|
MIT
| 2020-12-02T03:08:13
| 2020-07-06T08:32:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
import pytest
import torch
from laia.callbacks import LearningRate
from laia.dummies import DummyEngine, DummyLoggingPlugin, DummyMNIST, DummyTrainer
def test_learning_rate_warns(tmpdir):
trainer = DummyTrainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[LearningRate()],
)
with pytest.warns(RuntimeWarning, match=r"You are using LearningRateMonitor.*"):
trainer.fit(DummyEngine(), datamodule=DummyMNIST())
class __TestEngine(DummyEngine):
def configure_optimizers(self):
optimizer = super().configure_optimizers()
return [optimizer], [torch.optim.lr_scheduler.StepLR(optimizer, 1)]
@pytest.mark.parametrize("num_processes", (1, 2))
def test_learning_rate(tmpdir, num_processes):
log_filepath = tmpdir / "log"
trainer = DummyTrainer(
default_root_dir=tmpdir,
max_epochs=3,
callbacks=[LearningRate()],
accelerator="ddp_cpu" if num_processes > 1 else None,
num_processes=num_processes,
plugins=[DummyLoggingPlugin(log_filepath)],
)
trainer.fit(__TestEngine(), datamodule=DummyMNIST())
if num_processes > 1:
log_filepath_rank1 = tmpdir.join("log.rank1")
assert log_filepath_rank1.exists()
assert not log_filepath_rank1.read_text("utf-8")
assert log_filepath.exists()
lines = [l.strip() for l in log_filepath.readlines()]
for e in range(1, trainer.max_epochs):
expected = f"E{e}: lr-Adam 1.000e-0{e + 2} ⟶ 1.000e-0{e + 3}"
assert lines.count(expected) == 1
|
[
"carlossmocholi@gmail.com"
] |
carlossmocholi@gmail.com
|
c32161bd88210e1a6c87cb5395adf9e602d68732
|
61aa319732d3fa7912e28f5ff7768498f8dda005
|
/src/cpu/testers/gpu_ruby_test/ProtocolTester.py
|
cf24aec71ce40a0c2c4589ca6fb05c77686a5dd2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
TeCSAR-UNCC/gem5-SALAM
|
37f2f7198c93b4c18452550df48c1a2ab14b14fb
|
c14c39235f4e376e64dc68b81bd2447e8a47ff65
|
refs/heads/main
| 2023-06-08T22:16:25.260792
| 2023-05-31T16:43:46
| 2023-05-31T16:43:46
| 154,335,724
| 62
| 22
|
BSD-3-Clause
| 2023-05-31T16:43:48
| 2018-10-23T13:45:44
|
C++
|
UTF-8
|
Python
| false
| false
| 3,587
|
py
|
# Copyright (c) 2017-2021 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from m5.objects.ClockedObject import ClockedObject
from m5.params import *
from m5.proxy import *
class ProtocolTester(ClockedObject):
type = 'ProtocolTester'
cxx_header = "cpu/testers/gpu_ruby_test/protocol_tester.hh"
cxx_class = 'gem5::ProtocolTester'
cpu_ports = VectorRequestPort("Ports for CPUs")
dma_ports = VectorRequestPort("Ports for DMAs")
cu_vector_ports = VectorRequestPort("Vector ports for GPUs")
cu_sqc_ports = VectorRequestPort("SQC ports for GPUs")
cu_scalar_ports = VectorRequestPort("Scalar ports for GPUs")
cu_token_ports = VectorRequestPort("Token ports for GPU")
cus_per_sqc = Param.Int(4, "Number of CUs per SQC")
cus_per_scalar = Param.Int(4, "Number of CUs per scalar cache")
wavefronts_per_cu = Param.Int(1, "Number of wavefronts per CU")
workitems_per_wavefront = Param.Int(64, "Number of workitems per wf")
max_cu_tokens = Param.Int(4, "Maximum number of tokens, i.e., the number"
" of instructions that can be uncoalesced"
" before back-pressure occurs from the"
" coalescer.")
cpu_threads = VectorParam.CpuThread("All cpus")
dma_threads = VectorParam.DmaThread("All DMAs")
wavefronts = VectorParam.GpuWavefront("All wavefronts")
num_atomic_locations = Param.Int(2, "Number of atomic locations")
num_normal_locs_per_atomic = Param.Int(1000, \
"Number of normal locations per atomic")
episode_length = Param.Int(10, "Number of actions per episode")
max_num_episodes = Param.Int(20, "Maximum number of episodes")
debug_tester = Param.Bool(False, "Are we debugging the tester?")
random_seed = Param.Int(0, "Random seed number. Default value (0) means \
using runtime-specific value.")
log_file = Param.String("Log file's name")
system = Param.System(Parent.any, "System we belong to")
|
[
"sroger48@uncc.edu"
] |
sroger48@uncc.edu
|
2678427304e86a98502f35d1db2967dda840a57b
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/can-you-eat-your-favorite-candy-on-your-favorite-day.py
|
bdb18cca50672369c8859fc0b27ba4afe75dc6ed
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Time: O(n)
# Space: O(n)
class Solution(object):
def canEat(self, candiesCount, queries):
"""
:type candiesCount: List[int]
:type queries: List[List[int]]
:rtype: List[bool]
"""
prefix = [0]*(len(candiesCount)+1)
for i, c in enumerate(candiesCount):
prefix[i+1] = prefix[i]+c
return [prefix[t]//c < d+1 <= prefix[t+1]//1 for t, d, c in queries]
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.