blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dd4730257d2ce9162b298d0a83a83683ab1fdb6 | 4900fcc64f66590068ba2197714b8ac4d2bc00fc | /posts/migrations/0024_subcat.py | 64427f1bc083f827dd955d7976162f7fda01079a | [] | no_license | Titania1/e-learning-platforme | ccea984afd1bc6407d9fd89369b17b47a3203f9a | 798633e16c8aab4a6b4ea66b1231f90b92d99cff | refs/heads/main | 2023-06-20T07:26:55.149463 | 2021-07-18T10:41:42 | 2021-07-18T10:41:42 | 387,149,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # Generated by Django 3.1.4 on 2021-01-04 11:56
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0023_auto_20210104_1057'),
]
operations = [
migrations.CreateModel(
name='subcat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='title', unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.category')),
],
),
]
| [
"shivamrohillaa@gmail.com"
] | shivamrohillaa@gmail.com |
55f99b5bd75bdb54382219c2c32332676bbf0b37 | 4ad7b285be90bf5e1dad8d81f741fe177c56dbf4 | /whelk/tests/test_basic.py | 94414511153074c958485a3408f29dc618dae43b | [
"LicenseRef-scancode-warranty-disclaimer",
"Zlib"
] | permissive | git-spindle-test-1/whelk | 1be65e23ca79203bc714a9b22a236f52d89ed13b | 03406c86a4435b698b9144ce674c8c8a994d8b5e | refs/heads/master | 2021-01-21T12:43:26.481368 | 2015-12-14T15:08:44 | 2015-12-18T18:30:52 | 56,990,062 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | from whelk.tests import *
class BasicTest(unittest.TestCase):
"""Tests whether we can find commands"""
def test_notfound(self):
# Non-existing command
self.assertRaises(AttributeError, lambda: shell.i_do_not_exist)
self.assertRaises(KeyError, lambda: shell['/not/found'])
def test_basic(self):
# Basic command test
r = shell.test_return('0', 'output')
self.assertEqual(r.returncode, 0)
self.assertEqual(r.stdout, b('output\n'))
self.assertEqual(r.stderr, b(''))
r = shell.test_return('22', 'stdout', 'stderr')
self.assertEqual(r.returncode, 22)
self.assertEqual(r.stdout, b('stdout\n'))
self.assertEqual(r.stderr, b('stderr\n'))
def test_underscores(self):
# Underscore-replacement
c = shell.test_dashes
self.assertTrue('test-dashes' in c.name)
r = c(0)
self.assertEqual(r.returncode, 0)
self.assertEqual(r.stderr, b(''))
self.assertEqual(r.stdout, b(''))
def test_exceptions(self):
self.assertRaises(CommandFailed, lambda: shell.false(raise_on_error=True))
def test_defaults(self):
s = Shell(stdout = shell.STDOUT)
input = b("Testing 1 2 3")
r = s.cat(input=input)
self.assertEqual(r.returncode, 0)
self.assertEqual(r.stdout, input)
| [
"dennis@kaarsemaker.net"
] | dennis@kaarsemaker.net |
d2c17457e9ba693684064ebe430d7e92251f9529 | c97536dc1d63e5ab99a3c494cdbd7c329e654efd | /api/client/test/test_pipeline_service_api.py | 01a580101c9ebd2fa52b9387194901e9d484f82c | [
"Apache-2.0"
] | permissive | sabary661990615/mlx | 9958180c42e00b32498b572098789046c927aa0c | 7684155c074f1bd1d16ab183598ce6d19414267b | refs/heads/main | 2023-07-14T12:45:35.408653 | 2021-08-20T21:34:17 | 2021-08-20T21:34:17 | 394,724,026 | 0 | 0 | Apache-2.0 | 2021-08-10T17:11:49 | 2021-08-10T17:11:48 | null | UTF-8 | Python | false | false | 2,377 | py | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.pipeline_service_api import PipelineServiceApi # noqa: E501
from swagger_client.rest import ApiException
class TestPipelineServiceApi(unittest.TestCase):
"""PipelineServiceApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.pipeline_service_api.PipelineServiceApi() # noqa: E501
def tearDown(self):
pass
def test_approve_pipelines_for_publishing(self):
"""Test case for approve_pipelines_for_publishing
"""
pass
def test_create_pipeline(self):
"""Test case for create_pipeline
"""
pass
def test_delete_pipeline(self):
"""Test case for delete_pipeline
"""
pass
def test_download_pipeline_files(self):
"""Test case for download_pipeline_files
Returns the pipeline YAML compressed into a .tgz (.tar.gz) file. # noqa: E501
"""
pass
def test_get_pipeline(self):
"""Test case for get_pipeline
"""
pass
def test_get_template(self):
"""Test case for get_template
"""
pass
def test_list_pipelines(self):
"""Test case for list_pipelines
"""
pass
def test_set_featured_pipelines(self):
"""Test case for set_featured_pipelines
"""
pass
def test_upload_pipeline(self):
"""Test case for upload_pipeline
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"82406273+mlx-bot@users.noreply.github.com"
] | 82406273+mlx-bot@users.noreply.github.com |
0b07d00ae2dbeac0f4c27afa2a0627c8cecf4ce3 | 264ff719d21f2f57451f322e9296b2f55b473eb2 | /tools/nntool/reports/activation_reporter.py | 00b3388c2cf0163e0f28e97977614afe6899ef17 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | knmcguire/gap_sdk | 06c9537c16fa45dea6b7f5c6b162b53953262915 | 7b0a09a353ab6f0550793d40bd46e98051f4a3d7 | refs/heads/master | 2020-12-20T06:51:19.580497 | 2020-01-21T14:52:28 | 2020-01-21T14:52:28 | 235,992,961 | 0 | 0 | Apache-2.0 | 2020-01-24T11:45:59 | 2020-01-24T11:45:58 | null | UTF-8 | Python | false | false | 3,693 | py | # Copyright (C) 2019 GreenWaves Technologies
# All rights reserved.
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import copy
from collections import OrderedDict
from graph.nngraph import NNGraph
from utils.stats_funcs import STATS_BITS, astats, calculate_qsnrs
from utils.tabular import Tabular, TabularColumn
from utils.node_id import NodeId
from .reporter import Reporter
def gather_stats(activation, force_ideal=False):
stat = astats(activation)
stat['qstats'] = calculate_qsnrs(activation, stat['ibits'], force_ideal)
return stat
def appendn(row, rep, val=""):
for _ in range(rep):
row.append(val)
def do_header(table):
header = [
TabularColumn("name"),
TabularColumn("mean", fmt=">.3f"),
TabularColumn("std dev", fmt=">.3f"),
TabularColumn("min", fmt=">.3f"),
TabularColumn("max", fmt=">.3f"),
TabularColumn("min acc", fmt=">.3f"),
TabularColumn("max acc", fmt=">.3f"),
TabularColumn("int\nbits", fmt=">d"),
]
for bit_size in STATS_BITS:
header.append(TabularColumn("{}-bits\nformat".format(bit_size), fmt="^s"))
header.append(TabularColumn("{}-bits\nQSNR".format(bit_size), fmt=">.0f"))
header.append(TabularColumn("size\n(bytes)", fmt=">d"))
table.add_row(header)
def do_row(table, node_name, stat, threshold, total):
row = [node_name, stat['mean'], stat['std'],
stat['min'], stat['max']]
if 'min_acc' in stat:
row.append(stat['min_acc'])
row.append(stat['max_acc'])
else:
row.append("")
row.append("")
row.append(stat['ibits'])
if 'qstats' not in stat:
appendn(row, len(STATS_BITS) * 2 + 1)
table.add_row(row)
return total
qstats = stat['qstats']
sel = None
for bit_size in STATS_BITS:
if bit_size in qstats:
qstat = qstats[bit_size]
if (sel is None or sel > bit_size) and qstat['qsnr'] > threshold:
sel = bit_size
row.append("Q{}".format(qstat['q']))
row.append(qstat['qsnr'])
else:
appendn(row, 2)
if sel is not None:
size = stat['size']*sel//8
total += size
row.append(size)
else:
row.append("")
table.add_row(row)
return total
def do_rows(stats, table, threshold):
total = 0
for node_name, stat in stats.items():
total = do_row(table, node_name, stat, threshold, total)
return total
def do_total(table, total):
total_row = ["TOTAL"]
appendn(total_row, 7 + len(STATS_BITS) * 2)
total_row.append(total)
table.add_row(total_row)
def dump_stats_table(stats, do_totals=True, threshold=30):
table = Tabular()
do_header(table)
total = do_rows(stats, table, threshold)
if do_totals:
do_total(table, total)
return table
class ActivationReporter(Reporter):
def __init__(self, do_totals=True, threshold=30.0, yield_fusions=False):
self._do_totals = do_totals
self._threshold = threshold
self._yield_fusions = yield_fusions
def report(self, G: NNGraph, stats):
dump_stats = OrderedDict()
for _, node, fusion_idx, fnode in G.nodes_iterator(self._yield_fusions):
stat = stats[NodeId(node, fnode)]
stat = copy.deepcopy(stat)
if fusion_idx:
name = "{}_{}".format(node.name, fusion_idx)
else:
name = node.name
dump_stats[name] = stat
return dump_stats_table(dump_stats, do_totals=self._do_totals, threshold=self._threshold)
| [
"noreply@github.com"
] | knmcguire.noreply@github.com |
6b57348d5f5ad826051c302ab6dd0b359e0c9756 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/gigasecond/442744b90ea54dde9d89774dcc794c00.py | cac1e2d437346165b5cedc399ecfb21e3134ec27 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 167 | py | from datetime import date, timedelta
gigaseconds = timedelta(seconds=10**9)
def add_gigasecond(birthdate):
return birthdate + gigaseconds
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
5102a36c3d40740383d2bf17d4d414eaded9c386 | 870639af1487cf59b548f56c9cd1a45928c1e2c2 | /tests/components/knx/test_services.py | c61dc54258630841851c356535c813aae14a0971 | [
"Apache-2.0"
] | permissive | atmurray/home-assistant | 9f050944d26c084f8f21e8612a7b90c0ae909763 | 133cb2c3b0e782f063c8a30de4ff55a5c14b9b03 | refs/heads/dev | 2023-03-19T04:26:40.743852 | 2021-11-27T05:58:25 | 2021-11-27T05:58:25 | 234,724,430 | 2 | 0 | Apache-2.0 | 2023-02-22T06:18:36 | 2020-01-18T11:27:02 | Python | UTF-8 | Python | false | false | 5,995 | py | """Test KNX services."""
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from .conftest import KNXTestKit
from tests.common import async_capture_events
async def test_send(hass: HomeAssistant, knx: KNXTestKit):
"""Test `knx.send` service."""
test_address = "1/2/3"
await knx.setup_integration({})
# send DPT 1 telegram
await hass.services.async_call(
"knx", "send", {"address": test_address, "payload": True}, blocking=True
)
await knx.assert_write(test_address, True)
# send raw DPT 5 telegram
await hass.services.async_call(
"knx", "send", {"address": test_address, "payload": [99]}, blocking=True
)
await knx.assert_write(test_address, (99,))
# send "percent" DPT 5 telegram
await hass.services.async_call(
"knx",
"send",
{"address": test_address, "payload": 99, "type": "percent"},
blocking=True,
)
await knx.assert_write(test_address, (0xFC,))
# send "temperature" DPT 9 telegram
await hass.services.async_call(
"knx",
"send",
{"address": test_address, "payload": 21.0, "type": "temperature"},
blocking=True,
)
await knx.assert_write(test_address, (0x0C, 0x1A))
# send multiple telegrams
await hass.services.async_call(
"knx",
"send",
{"address": [test_address, "2/2/2", "3/3/3"], "payload": 99, "type": "percent"},
blocking=True,
)
await knx.assert_write(test_address, (0xFC,))
await knx.assert_write("2/2/2", (0xFC,))
await knx.assert_write("3/3/3", (0xFC,))
async def test_read(hass: HomeAssistant, knx: KNXTestKit):
"""Test `knx.read` service."""
await knx.setup_integration({})
# send read telegram
await hass.services.async_call("knx", "read", {"address": "1/1/1"}, blocking=True)
await knx.assert_read("1/1/1")
# send multiple read telegrams
await hass.services.async_call(
"knx",
"read",
{"address": ["1/1/1", "2/2/2", "3/3/3"]},
blocking=True,
)
await knx.assert_read("1/1/1")
await knx.assert_read("2/2/2")
await knx.assert_read("3/3/3")
async def test_event_register(hass: HomeAssistant, knx: KNXTestKit):
"""Test `knx.event_register` service."""
events = async_capture_events(hass, "knx_event")
test_address = "1/2/3"
await knx.setup_integration({})
# no event registered
await knx.receive_write(test_address, True)
await hass.async_block_till_done()
assert len(events) == 0
# register event with `type`
await hass.services.async_call(
"knx",
"event_register",
{"address": test_address, "type": "2byte_unsigned"},
blocking=True,
)
await knx.receive_write(test_address, (0x04, 0xD2))
await hass.async_block_till_done()
assert len(events) == 1
typed_event = events.pop()
assert typed_event.data["data"] == (0x04, 0xD2)
assert typed_event.data["value"] == 1234
# remove event registration - no event added
await hass.services.async_call(
"knx",
"event_register",
{"address": test_address, "remove": True},
blocking=True,
)
await knx.receive_write(test_address, True)
await hass.async_block_till_done()
assert len(events) == 0
# register event without `type`
await hass.services.async_call(
"knx", "event_register", {"address": test_address}, blocking=True
)
await knx.receive_write(test_address, True)
await knx.receive_write(test_address, False)
await hass.async_block_till_done()
assert len(events) == 2
untyped_event_2 = events.pop()
assert untyped_event_2.data["data"] is False
assert untyped_event_2.data["value"] is None
untyped_event_1 = events.pop()
assert untyped_event_1.data["data"] is True
assert untyped_event_1.data["value"] is None
async def test_exposure_register(hass: HomeAssistant, knx: KNXTestKit):
"""Test `knx.exposure_register` service."""
test_address = "1/2/3"
test_entity = "fake.entity"
test_attribute = "fake_attribute"
await knx.setup_integration({})
# no exposure registered
hass.states.async_set(test_entity, STATE_ON, {})
await knx.assert_no_telegram()
# register exposure
await hass.services.async_call(
"knx",
"exposure_register",
{"address": test_address, "entity_id": test_entity, "type": "binary"},
blocking=True,
)
hass.states.async_set(test_entity, STATE_OFF, {})
await knx.assert_write(test_address, False)
# register exposure
await hass.services.async_call(
"knx",
"exposure_register",
{"address": test_address, "remove": True},
blocking=True,
)
hass.states.async_set(test_entity, STATE_ON, {})
await knx.assert_no_telegram()
# register exposure for attribute with default
await hass.services.async_call(
"knx",
"exposure_register",
{
"address": test_address,
"entity_id": test_entity,
"attribute": test_attribute,
"type": "percentU8",
"default": 0,
},
blocking=True,
)
# no attribute on first change wouldn't work because no attribute change since last test
hass.states.async_set(test_entity, STATE_ON, {test_attribute: 30})
await knx.assert_write(test_address, (30,))
hass.states.async_set(test_entity, STATE_OFF, {})
await knx.assert_write(test_address, (0,))
# don't send same value sequentially
hass.states.async_set(test_entity, STATE_ON, {test_attribute: 25})
hass.states.async_set(test_entity, STATE_ON, {test_attribute: 25})
hass.states.async_set(test_entity, STATE_ON, {test_attribute: 25, "unrelated": 2})
hass.states.async_set(test_entity, STATE_OFF, {test_attribute: 25})
await knx.assert_telegram_count(1)
await knx.assert_write(test_address, (25,))
| [
"noreply@github.com"
] | atmurray.noreply@github.com |
0de155d0c6246dd70a43627dbd6d4515146bed41 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_167/ch56_2019_04_02_11_22_12_410576.py | b71102a26ce7e6987c76cd0a9081fe8dbd16784a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def calcula_total_da_nota (n):
l=[]
b=[]
i=0
n=0
while i and n < len(n) and len(i):
total= n*i
i+=1
n+=1
| [
"you@example.com"
] | you@example.com |
679461f083307a15e18fd892f8642f995542c663 | 3f5a1ef51620fd8c35ef38064ca5aa00776ab6f4 | /ds_and_algo_educative/Circular_Linked_List/Delete_node.py | b03ffc0b97506c96ab87f6e8860db2a1d712a1da | [] | no_license | poojagmahajan/python_exercises | 1b290a5c0689f703538caf89bca5bc6c1fdb392a | 65539cf31c5b2ad5768d652ed5fe95054ce5f63f | refs/heads/master | 2022-11-12T03:52:13.533781 | 2020-07-04T20:50:29 | 2020-07-04T20:54:46 | 263,151,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,958 | py |
"""steps -
1. if List is not empty
2. if Deleting the head node
set cur to head
iterate till next of cur is head
(means here cur is last node which points to head)
3. if head is only node in list means point to self
then make head none
4. else (list having more nodes ,not just head)
head delete
and update new head to next node of old head
5. else (deleting node other than head)
take two pointers cur and prev
iterate till head
update pointers prev and cur
if key match
then delete node and update cur """
class Node:
def __init__(self, data):
self.data = data
self.next = None
class CircularLinkedList:
def __init__(self):
self.head = None
def prepend(self, data):
new_node = Node(data)
cur = self.head
new_node.next = self.head
if not self.head:
new_node.next = new_node
else:
while cur.next != self.head:
cur = cur.next
cur.next = new_node
self.head = new_node
def append(self, data):
if not self.head:
self.head = Node(data)
self.head.next = self.head
else:
new_node = Node(data)
cur = self.head
while cur.next != self.head:
cur = cur.next
cur.next = new_node
new_node.next = self.head
def print_list(self):
cur = self.head
while cur:
print(cur.data)
cur = cur.next
if cur == self.head:
break
def remove(self, key):
if self.head : # List is not empty
if self.head.data == key : #node to be Delete is head node
cur = self.head
while cur.next != self.head :
cur = cur.next # at end cur will be last node points to head
if self.head == self.head.next : #only one node in list
self.head = None
else: # #head with other nodes preent in list
cur.next = self.head.next #delete head
self.head = self.head.next # make new head
else: # node to be delete is other than head
cur = self.head
prev = None
while cur.next != self.head: # traverse the list till end
prev = cur
cur = cur.next
if cur.data == key: # if node match
prev.next = cur.next #delete node
cur = cur.next
cllist = CircularLinkedList()
cllist.append("A")
cllist.append("B")
cllist.append("C")
cllist.append("D")
cllist.remove("A")
cllist.remove("C")
cllist.print_list() | [
"mahajanpoojag@gmail.com"
] | mahajanpoojag@gmail.com |
13e571b7ef732cb18760a4b8eac56aae11b7b6f6 | af500242dc59de0855873e87a1f7f3ff69f7c9b0 | /discord_bot.py | 08bca3a0117cb0e2921510f9b396bd5bc3e1a628 | [] | no_license | Sispheor/discord_troll_bot | 0400aefe9ca6477139b498c6850cf2d710810a10 | 6621556cd63c20e21865de6f05760f6fb321674d | refs/heads/master | 2022-02-22T09:05:09.423633 | 2021-10-24T19:00:08 | 2021-10-24T19:30:07 | 156,092,074 | 0 | 0 | null | 2021-04-18T10:05:53 | 2018-11-04T14:44:24 | Python | UTF-8 | Python | false | false | 2,669 | py | # authorize bot
# https://discordapp.com/oauth2/authorize?&client_id=<my_id>&scope=bot&permissions=0
# apt install ffmpeg
import os
import signal
import discord as discord
from database_loader import get_database
from models.discord_user import DiscordUser
from models.game_session import GameSession
from my_discord_client import MyClient
import logging
client = None
def init_logger():
logger = logging.getLogger('discord_bot')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def init_database():
print("Init database")
db = get_database("troll_bot")
db.connect()
db.create_tables([DiscordUser, GameSession])
db.close()
print("Init database... done")
def main():
logger = logging.getLogger('discord_bot')
logger.info("Start Discord Troll Bot")
server_id = os.getenv('DISCORD_SERVER_ID', None)
bot_id = os.getenv('DISCORD_BOT_ID', None)
discord_token = os.getenv('DISCORD_TOKEN', None)
mysql_host = os.getenv('MYSQL_HOST', "127.0.0.1")
mysql_user = os.getenv('MYSQL_USER', None)
mysql_password = os.getenv('MYSQL_PASSWORD', None)
mysql_database = os.getenv('MYSQL_DATABASE', None)
if server_id is None:
print("You must provide a 'DISCORD_SERVER_ID'")
exit(1)
if bot_id is None:
print("You must provide a 'DISCORD_BOT_ID'")
exit(1)
if discord_token is None:
print("You must provide a 'DISCORD_TOKEN'")
exit(1)
if mysql_user is None:
print("You must provide a 'MYSQL_USER'")
exit(1)
if mysql_password is None:
print("You must provide a 'MYSQL_PASSWORD'")
exit(1)
if mysql_host is None:
print("You must provide a 'MYSQL_HOST'")
exit(1)
if mysql_database is None:
print("You must provide a 'MYSQL_DATABASE'")
exit(1)
logger.info("DISCORD_SERVER_ID: %s" % server_id)
logger.info("DISCORD_BOT_ID: %s" % bot_id)
intents = discord.Intents.default()
intents.typing = False
intents.members = True
intents.presences = True
client = MyClient(intents=intents)
client.run(discord_token)
def handle_exit():
print("Clean exit")
if client is not None:
client.change_presence(status=discord.Status.offline)
client.logout()
client.close()
print("Disconnected")
if __name__ == "__main__":
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
init_logger()
init_database()
main()
| [
"nico.marcq@gmail.com"
] | nico.marcq@gmail.com |
f1571ba276d35a04cb87417fbfbd357256c78554 | 4250618abef0d0dcf399f8a2a23e2049c3458ea8 | /website/wiki/editors/__init__.py | 926c100f57e637ce11700d4e06c62066e48e9193 | [
"MIT"
] | permissive | skbly7/serc | 121fd7e88df25213de4d53fce4bd03c2ea448d68 | 4442298ee05c24c3c6bacffdc56a9f6076397cce | refs/heads/master | 2020-12-27T03:18:45.280464 | 2019-05-16T06:10:31 | 2019-05-16T19:13:12 | 53,425,352 | 0 | 2 | MIT | 2019-05-16T19:13:14 | 2016-03-08T16:00:03 | Python | UTF-8 | Python | false | false | 420 | py | from __future__ import absolute_import
from wiki.conf import settings
from django.core.urlresolvers import get_callable
_EditorClass = None
_editor = None
def getEditorClass():
global _EditorClass
if not _EditorClass:
_EditorClass = get_callable(settings.EDITOR)
return _EditorClass
def getEditor():
global _editor
if not _editor:
_editor = getEditorClass()()
return _editor
| [
"skbly7@gmail.com"
] | skbly7@gmail.com |
bffda1a8a18b767fb92c11803909d33101a396ef | 6c8b3ef3b6a3e77ee9a3cc56898217654b043154 | /typings/rdkit/Chem/Suppliers/DbMolSupplier.pyi | 61f763c7a446fccf2fb3da136c8676a0431cb1d8 | [
"MIT"
] | permissive | Andy-Wilkinson/ChemMLToolkit | 8a1eb24ab317c470bc89efa206e38734cb83a7d2 | 83efc7ea66d2def860a3e04ccd70d77fb689fddc | refs/heads/main | 2021-12-26T04:44:05.566942 | 2021-12-13T21:59:57 | 2021-12-13T21:59:57 | 171,165,863 | 2 | 2 | MIT | 2021-12-13T17:18:30 | 2019-02-17T20:00:01 | Python | UTF-8 | Python | false | false | 1,565 | pyi | """
This type stub file was generated by pyright.
"""
from rdkit.Chem.Suppliers.MolSupplier import MolSupplier
"""
Supplies a class for working with molecules from databases
"""
def warning(msg, dest=...): # -> None:
...
class DbMolSupplier(MolSupplier):
"""
new molecules come back with all additional fields from the
database set in a "_fieldsFromDb" data member
"""
def __init__(self, dbResults, molColumnFormats=..., nameCol=..., transformFunc=..., **kwargs) -> None:
"""
DbResults should be a subclass of Dbase.DbResultSet.DbResultBase
"""
...
def GetColumnNames(self): # -> list[Unknown] | tuple[Unknown, ...]:
...
class ForwardDbMolSupplier(DbMolSupplier):
""" DbMol supplier supporting only forward iteration
new molecules come back with all additional fields from the
database set in a "_fieldsFromDb" data member
"""
def __init__(self, dbResults, **kwargs) -> None:
"""
DbResults should be an iterator for Dbase.DbResultSet.DbResultBase
"""
...
def Reset(self): # -> None:
...
def NextMol(self): # -> None:
"""
NOTE: this has side effects
"""
...
class RandomAccessDbMolSupplier(DbMolSupplier):
def __init__(self, dbResults, **kwargs) -> None:
"""
DbResults should be a Dbase.DbResultSet.RandomAccessDbResultSet
"""
...
def __len__(self): # -> int:
...
def __getitem__(self, idx): # -> None:
...
def Reset(self): # -> None:
...
def NextMol(self): # -> None:
...
| [
"Andy-Wilkinson@users.noreply.github.com"
] | Andy-Wilkinson@users.noreply.github.com |
1ec663443d423b72112a60041ac5443ef0a9f4b3 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /94/94.binary-tree-inorder-traversal.234795664.Accepted.leetcode.py | 26e7f20ae3496c22874206d6011746e3b09545bf | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | class Solution(object):
def inorderTraversal(self, root):
result = []
stack = []
while stack or root:
if root:
stack.append(root)
root = root.left
else:
root = stack.pop()
result.append(root.val)
root = root.right
return result
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
8fe8d8bbb0912b1b6723ae96719c9d83cd3a7cb7 | 60fa442ae76b960ab21b10fb527c0eac85cdc587 | /python/GetBfacsfromPdb2Pdb.py | 25a0cab9d1c0a5eb66ba89cf74ff05ede32f6a54 | [] | no_license | pjanowski/Pawel_PhD_Scripts | 8e6c2b92b492f9cacf425327a01faaceb27bb87d | 5f9b1735ca6da8fdf0946d6748f3da7d3d723d5e | refs/heads/master | 2021-01-10T06:15:30.287053 | 2015-11-16T04:04:07 | 2015-11-16T04:04:07 | 46,250,317 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | #! /usr/bin/python
import sys
import os
from numpy import *
#####input the following variables
crystalfile=sys.argv[1]
targetfile=sys.argv[2]
#############################
#This is to get the bfactors from the original cif or pdb file and put them
#the bfactor column of another file. If atoms are in the same order in the
#two files, you can just use cut, but if not use this script. It identifies
#atoms by atomname and residue number.
f=open(crystalfile,'r')
p= [l for l in f.readlines() if l.strip()]
f.close()
#read in amber pdb (eliminate H and EW atoms,
f=open(targetfile,'r')
a = [l for l in f.readlines() if l.strip()]
f.close()
f=open('newfile.pdb','w')
for line in a:
if line[0:6] != 'ATOM ':
f.write(line)
else:
check=0
resnum=int(line[22:26])
atomname=line[12:16].strip()
for line2 in p:
if line2[0:6] != 'ATOM ':
continue
if (int(line2[22:26]))==resnum and line2[12:16].strip()==atomname:
bfactor=float(line2[60:66])
f.write(line[0:60]+'%6.2f' %bfactor +line[66:])
check=+1
if check>1:
print "oh boy something is wrong"
print line
if check==0:
f.write(line)
print line
f.close()
| [
"pawelrc@gmail.com"
] | pawelrc@gmail.com |
e466432360117169eeda09a88c691d3f1ac4ecda | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/file/test_sensor.py | 725ccb527f8127426461473a8f07eae6fda3b9de | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 3,066 | py | """The tests for local file sensor platform."""
from unittest.mock import Mock, patch
import pytest
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import get_fixture_path, mock_registry
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_value(hass: HomeAssistant) -> None:
"""Test the File sensor."""
config = {
"sensor": {
"platform": "file",
"name": "file1",
"file_path": get_fixture_path("file_value.txt", "file"),
}
}
with patch.object(hass.config, "is_allowed_path", return_value=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.file1")
assert state.state == "21"
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_value_template(hass: HomeAssistant) -> None:
"""Test the File sensor with JSON entries."""
config = {
"sensor": {
"platform": "file",
"name": "file2",
"file_path": get_fixture_path("file_value_template.txt", "file"),
"value_template": "{{ value_json.temperature }}",
}
}
with patch.object(hass.config, "is_allowed_path", return_value=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.file2")
assert state.state == "26"
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_empty(hass: HomeAssistant) -> None:
"""Test the File sensor with an empty file."""
config = {
"sensor": {
"platform": "file",
"name": "file3",
"file_path": get_fixture_path("file_empty.txt", "file"),
}
}
with patch.object(hass.config, "is_allowed_path", return_value=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.file3")
assert state.state == STATE_UNKNOWN
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_path_invalid(hass: HomeAssistant) -> None:
"""Test the File sensor with invalid path."""
config = {
"sensor": {
"platform": "file",
"name": "file4",
"file_path": get_fixture_path("file_value.txt", "file"),
}
}
with patch.object(hass.config, "is_allowed_path", return_value=False):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("sensor")) == 0
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
cff39237331fe9e1d9a2bd00d5eb9295e1a2f178 | 3c936cecac73c0de0ce8bca959ef9b49abf96b5e | /slowfast/utils/benchmark.py | 94b94b4f8038517f82d504da2378cc5937ed7c48 | [
"Apache-2.0"
] | permissive | AlexanderMelde/SlowFast | 8cce07d399d3b0d2fe08bf471b5f69e147e9c9e3 | b26b3ec3f3b4cd34c3d626b0fa06818bc69327f4 | refs/heads/master | 2022-11-19T00:50:49.484136 | 2020-04-29T23:28:08 | 2020-04-29T23:30:51 | 257,588,764 | 0 | 0 | Apache-2.0 | 2020-04-30T20:51:40 | 2020-04-21T12:29:11 | Python | UTF-8 | Python | false | false | 3,198 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Functions for benchmarks.
"""
import numpy as np
import pprint
import torch
import tqdm
from fvcore.common.timer import Timer
import slowfast.utils.logging as logging
import slowfast.utils.misc as misc
from slowfast.datasets import loader
from slowfast.utils.env import setup_environment
logger = logging.get_logger(__name__)
def benchmark_data_loading(cfg):
"""
Benchmark the speed of data loading in PySlowFast.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
setup_environment()
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Benchmark data loading with config:")
logger.info(pprint.pformat(cfg))
timer = Timer()
dataloader = loader.construct_loader(cfg, "train")
logger.info(
"Initialize loader using {:.2f} seconds.".format(timer.seconds())
)
# Total batch size across different machines.
batch_size = cfg.TRAIN.BATCH_SIZE * cfg.NUM_SHARDS
log_period = cfg.BENCHMARK.LOG_PERIOD
epoch_times = []
# Test for a few epochs.
for cur_epoch in range(cfg.BENCHMARK.NUM_EPOCHS):
timer = Timer()
timer_epoch = Timer()
iter_times = []
for cur_iter, _ in enumerate(tqdm.tqdm(dataloader)):
if cur_iter > 0 and cur_iter % log_period == 0:
iter_times.append(timer.seconds())
ram_usage, ram_total = misc.cpu_mem_usage()
logger.info(
"Epoch {}: {} iters ({} videos) in {:.2f} seconds. "
"RAM Usage: {:.2f}/{:.2f} GB.".format(
cur_epoch,
log_period,
log_period * batch_size,
iter_times[-1],
ram_usage,
ram_total,
)
)
timer.reset()
epoch_times.append(timer_epoch.seconds())
ram_usage, ram_total = misc.cpu_mem_usage()
logger.info(
"Epoch {}: in total {} iters ({} videos) in {:.2f} seconds. "
"RAM Usage: {:.2f}/{:.2f} GB.".format(
cur_epoch,
len(dataloader),
len(dataloader) * batch_size,
epoch_times[-1],
ram_usage,
ram_total,
)
)
logger.info(
"Epoch {}: on average every {} iters ({} videos) take {:.2f}/{:.2f} "
"(avg/std) seconds.".format(
cur_epoch,
log_period,
log_period * batch_size,
np.mean(iter_times),
np.std(iter_times),
)
)
logger.info(
"On average every epoch ({} videos) takes {:.2f}/{:.2f} "
"(avg/std) seconds.".format(
len(dataloader) * batch_size,
np.mean(epoch_times),
np.std(epoch_times),
)
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e6070a073f733d9b54b40d15cfacc66e992b4c2b | 20acb8c4bd5f29e6ecc9006f6228b787b6f71c73 | /app/travel_borders_api/asgi.py | eaa6393c06d5e5e1eeabb5d4ed12b51b0b4c0234 | [] | no_license | datainvestor/TravelBordersApi | 3e6dd8c331c08603f16790aa52a4eb131754423e | 75cd5936f7c121ab8f90430f455095337eb5c141 | refs/heads/master | 2023-05-09T05:24:40.843955 | 2021-06-03T19:11:33 | 2021-06-03T19:11:33 | 371,000,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
ASGI config for travel_borders_api project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'travel_borders_api.settings')
application = get_asgi_application()
| [
"you@example.com"
] | you@example.com |
14fa51b5951a609f30d6f5382c6dc0df6719efa5 | 2bc8f66fd34ba1b93de82c67954a10f8b300b07e | /general_backbone/models/layers/space_to_depth.py | 48e73a718a9b86f6a483b8172b52ba33c6fde35b | [] | no_license | DoDucNhan/general_backbone | 7dabffed5a74e622ba23bf275358ca2d09faddc1 | 686c92ab811221d594816207d86a0b97c9b4bc73 | refs/heads/main | 2023-08-31T14:59:23.873555 | 2021-10-23T06:34:14 | 2021-10-23T06:34:14 | 420,419,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | # Copyright (c) general_backbone. All rights reserved.
import torch
import torch.nn as nn
class SpaceToDepth(nn.Module):
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)
return x
@torch.jit.script
class SpaceToDepthJit(object):
def __call__(self, x: torch.Tensor):
# assuming hard-coded that block_size==4 for acceleration
N, C, H, W = x.size()
x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs)
return x
class SpaceToDepthModule(nn.Module):
def __init__(self, no_jit=False):
super().__init__()
if not no_jit:
self.op = SpaceToDepthJit()
else:
self.op = SpaceToDepth()
def forward(self, x):
return self.op(x)
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)
x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)
return x
| [
"phamdinhkhanh.tkt53.neu@gmail.com"
] | phamdinhkhanh.tkt53.neu@gmail.com |
690d1fb3811c94ca85e0333746c7a3bd66f82987 | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/chrome/browser/ui/views/DEPS | 4637b49c44569e29c5e05fa5798cb3fe21e9680c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 196 | include_rules = [
"+chrome/browser/ui/views",
"+components/constrained_window",
"+components/mus/public/cpp",
"+components/user_manager",
"+content/app/resources/grit/content_resources.h",
]
| [
"changhyeok.bae@lge.com"
] | changhyeok.bae@lge.com | |
eb3bae2cd6c429b345069cfc2bfb1afabf8a250c | cfb1073b578e94315bb824e1ee659950fd99b91f | /web/budgets/urls.py | 4463366aef415bd538988d6f33ba3e6bcf64f8fb | [] | no_license | madelinepet/budget_tool | 1cf8e910a5def4a13d4b491214fefd4d02e2409f | 8d682907f98959b88c191a06abba92e4f1c3fd46 | refs/heads/master | 2021-06-13T18:53:39.701832 | 2019-08-02T17:17:12 | 2019-08-02T17:17:12 | 151,135,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | from django.urls import path
from .views import (
BudgetListView,
TransactionDetailView,
BudgetCreateView,
TransactionCreateView,
)
urlpatterns = [
path('budget', BudgetListView.as_view(), name='budget_view'),
path('budget/new', BudgetCreateView.as_view(), name='budget_create'),
path(
'transaction/<int:id>',
TransactionDetailView.as_view(),
name='transaction_detail'
),
path(
'transaction/new',
TransactionCreateView.as_view(),
name="transaction_create"
)
]
| [
"madelinepet@hotmail.com"
] | madelinepet@hotmail.com |
a3227dc045f8e76677e6a561e74f403e6a16d8b4 | 05780fe9a74b116832611a35fce38fa24b4d4ffc | /madgraph/madgraph_binaries/models/OLD_loopModels_backup/smQCDNLOmass/lorentz.py | 3ea7fb70b89a58b93d5c0d64a9d5496a9aee5e20 | [] | no_license | cesarotti/Dark-Photons | d810658190297528470abe757c4a678075ef48f6 | c6dce1df70c660555bf039a78765e4efbffb4877 | refs/heads/master | 2021-01-22T19:26:13.892225 | 2015-01-28T05:43:20 | 2015-01-28T05:49:54 | 20,692,647 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,518 | py | # This file was automatically created by FeynRules $Revision: 535 $
# Mathematica version: 7.0 for Mac OS X x86 (64-bit) (November 11, 2008)
# Date: Fri 18 Mar 2011 18:40:51
from object_library import all_lorentz, Lorentz
from function_library import complexconjugate, re, im, csc, sec, acsc, asec
R2_GG_1 = Lorentz(name = 'R2_GG_1',
spins = [ 3, 3 ],
structure = 'P(-1,1)*P(-1,1)*Metric(1,2)')
R2_GG_2 = Lorentz(name = 'R2_GG_2',
spins = [ 3, 3 ],
structure = 'P(1,1)*P(2,1)')
R2_GG_3 = Lorentz(name = 'R2_GG_3',
spins = [ 3, 3 ],
structure = 'Metric(1,2)')
R2_QQ_1 = Lorentz(name = 'R2_QQ_1',
spins = [ 2, 2 ],
structure = 'P(-1,1)*Gamma(-1,2,1)')
R2_QQ_2 = Lorentz(name = 'R2_QQ_2',
spins = [ 2, 2 ],
structure = 'Identity(1,2)')
FFV1 = Lorentz(name = 'FFV1',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,1)')
VVV1 = Lorentz(name = 'VVV1',
spins = [ 3, 3, 3 ],
structure = 'P(3,1)*Metric(1,2) - P(3,2)*Metric(1,2) - P(2,1)*Metric(1,3) + P(2,3)*Metric(1,3) + P(1,2)*Metric(2,3) - P(1,3)*Metric(2,3)')
VVVV1 = Lorentz(name = 'VVVV1',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,3)*Metric(2,4)')
VVVV3 = Lorentz(name = 'VVVV3',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,2)*Metric(3,4)')
VVVV4 = Lorentz(name = 'VVVV4',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,3)*Metric(2,4) - Metric(1,2)*Metric(3,4)')
GHGHG = Lorentz(name = 'GHGHG',
spins = [ 1, 1, 3 ],
structure = 'P(3,1)')
#=============================================================================================
# 4-gluon R2 vertex
#=============================================================================================
R2_4G_1234 = Lorentz(name = 'R2_4G_1234',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,2)*Metric(3,4)')
R2_4G_1324 = Lorentz(name = 'R2_4G_1324',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,3)*Metric(2,4)')
R2_4G_1423 = Lorentz(name = 'R2_4G_1423',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3)')
#=============================================================================================
| [
"eyvind.niklasson@gmail.com"
] | eyvind.niklasson@gmail.com |
8ef864cb9d779223b9c72360d7e995c16611736f | 4a8bfa3407aa98a04ede3162f85467b1b5012fe7 | /aiogram/api/types/animation.py | bf9a29a321fea81e8096895f80a8b30531f09602 | [] | no_license | aiogram/tg-codegen | 07ec80814eec46f464d2490fd27b7b6b27257f1b | ba3c2f893591d45dda418dd16e0646e260afdf14 | refs/heads/master | 2022-12-09T10:44:10.781570 | 2021-11-07T23:33:25 | 2021-11-07T23:33:25 | 218,523,371 | 24 | 5 | null | 2022-12-08T08:47:43 | 2019-10-30T12:33:21 | Python | UTF-8 | Python | false | false | 1,276 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .base import TelegramObject
if TYPE_CHECKING:
from .photo_size import PhotoSize
# === Generated region: Animation ===
class Animation(TelegramObject):
"""
This object represents an animation file (GIF or H.264/MPEG-4 AVC video without sound).
Source: https://core.telegram.org/bots/api#animation
"""
file_id: str
"""Identifier for this file, which can be used to download or reuse the file"""
file_unique_id: str
"""Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file."""
width: int
"""Video width as defined by sender"""
height: int
"""Video height as defined by sender"""
duration: int
"""Duration of the video in seconds as defined by sender"""
thumb: Optional[PhotoSize] = None
"""*Optional*. Animation thumbnail as defined by sender"""
file_name: Optional[str] = None
"""*Optional*. Original animation filename as defined by sender"""
mime_type: Optional[str] = None
"""*Optional*. MIME type of the file as defined by sender"""
file_size: Optional[int] = None
"""*Optional*. File size in bytes"""
| [
"jroot.junior@gmail.com"
] | jroot.junior@gmail.com |
338b83160b9e57f4812825f9e2c52a813242d952 | bdbc9cd8c64cfa92efffb9e138cb282d36f69b0a | /addons/website_mail/__openerp__.py | a160ac0bab52188a3a20c2e1a6e1298140191e9f | [] | no_license | clebaresu/impra-adns | d330cece1b710643625627bfd7ed66bac7d233ef | 8b9889d86c6ea194cfb7b0db8bdc3284635cc081 | refs/heads/master | 2020-05-02T16:51:41.798969 | 2019-03-27T22:03:32 | 2019-03-27T22:03:32 | 178,080,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Mail',
'category': 'Hidden',
'summary': 'Website Module for Mail',
'version': '0.1',
'description': """Glue module holding mail improvements for website.""",
'author': 'OpenERP SA',
'depends': ['website', 'mail', 'email_template'],
'data': [
'views/snippets.xml',
'views/website_mail.xml',
'views/website_email_designer.xml',
'views/email_template_view.xml',
'data/mail_groups.xml',
'security/website_mail.xml',
],
'qweb': [
'static/src/xml/website_mail.xml'
],
'installable': True,
'auto_install': True,
}
| [
"clebaresu@gmail.com"
] | clebaresu@gmail.com |
1aed9706512090afb363b8b4ed3d72448e09f2ae | f11ecb59dab63af605c6e5f256ee59e00447ecc1 | /658-find-k-closest-elements.py | bdc8d9afda5c7990ddfaa5b37493cfc0919ea122 | [] | no_license | floydchenchen/leetcode | 626d55f72ec914764385ce82b0f3c57f5a7e9de8 | 9d9e0c08992ef7dbd9ac517821faa9de17f49b0e | refs/heads/master | 2022-10-07T20:33:55.728141 | 2020-06-08T16:09:17 | 2020-06-08T16:09:17 | 269,525,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # 658. Find K Closest Elements
# Given a sorted array, two integers k and x, find the k closest elements to x in the array.
# The result should also be sorted in ascending order. If there is a tie, the smaller
# elements are always preferred.
#
# Example 1:
# Input: [1,2,3,4,5], k=4, x=3
# Output: [1,2,3,4]
# Example 2:
# Input: [1,2,3,4,5], k=4, x=-1
# Output: [1,2,3,4]
# 1. Use python's custom sorting to sort the nums by each num's distance to x,
# if there is a tie we put smaller num before. For example,
# if we have [1,2,3,4,5], k=4, x=3, then the sorted array becomes [3,2,4,1,5].
# ==> sorted(nums, key=lambda num: (abs(num - x), num))
# 2. We return the first k elements in the sorted array in an ascending order.
# For example, the sorted array is [3,2,4,1,5], so we need to return [1,2,3,4].
# ==> sorted(sorted(nums, key=lambda num: (abs(num - x), num))[:k])
class Solution:
# O(nlgn) solution
def findClosestElements(self, nums, k, x):
# return sorted(sorted(nums, key=lambda num: (abs(num - x), num))[:k])
nums.sort(key=lambda num: (abs(num - x), num))
return sorted(nums[:k])
# O(lgn) solution: binary search
def findClosestElements1(self, nums, k, x):
left, right = 0, len(nums) - k - 1
while left <= right:
mid = (left + right) // 2
# 如果nums[mid]比nums[mid+k]离x更远
if x - nums[mid] > nums[mid + k] - x:
left = mid + 1
else:
right = mid - 1
return nums[left:left + k]
# print(Solution().findClosestElements1([1,2,3,4,5],4,3))
print(Solution().findClosestElements1([1], 1, 1))
| [
"chen2918@umn.edu"
] | chen2918@umn.edu |
985a206f9c8a4ee9e27bbe5543558e87b38d7bbe | 0feb9799532328d2eb5c9673751bf44a06652375 | /logic/falsifiability.py | c1472cd4f55454103672fe465c572608f1fd38dc | [] | no_license | krishnakatyal/philosophy | ebc78947508f12a9d06356d2cc8d38f6afb0510a | f7735e9adc9ba609894d89384562dbda2f794548 | refs/heads/master | 2022-03-28T14:00:52.460599 | 2020-01-25T00:28:55 | 2020-01-25T00:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py |
"""
We say that a theory is falsified only if we have accepted basic statements which contradict
it (cf. section 11, rule 2). This condition is necessary, but not sufficient; for we have seen
that non-reproducible single occurrences are of no significance to science.
Thus a few stray basic statements contradicting a theory will hardly induce us to reject it as falsified.
We shall take it as falsified only if we discover a reproducible effect which refutes the theory. In other words,
we only accept the falsification if a low-level empirical hypothesis which describes such an effect is
proposed and corroborated. This kind of hypothesis may be called a falsifying hypothesis.
The requirement that the falsifying hypothesis must be empirical, and so falsifiable, only means that it must
stand in a certain logical relationship to possible basic statements; thus this requirement only concerns the
logical form of the hypothesis. The rider that the hypothesis should be corroborated refers to tests which it
ought to have passed—tests which confront it with accepted basic statements.
The following is an example of inconsistent (logically false) statement - that is - one in which
p · ~p can be deduced. This is not an example of a falsifiable statement.
1. p -> (p v q) # From Bertrand Russell's "primitive propositions"
2. ~p -> (p -> q) # From substitiuting ̄pp for p and then p -> q for ~p v q
3. ~p · p -> q # By importation
Consider a class α of a finite number of occurrences, for example the class of throws made yesterday with this
particular die. This class α, which is assumed to be non-empty, serves, as it were, as a frame of reference, and
will be called a (finite) reference-class. The number of elements belonging to α, i.e. its cardinal number, is
denoted by ‘N(α)’, to be read ‘the number of α’. Now let there be another class, β, which may be finite or not.
We call β our property-class: it may be, for example, the class of all throws which show a five, or (as we shall say)
which have the property five.
The class of those elements which belong to both α and β, for example the class of throws made yesterday with this
particular die and having the property five, is called the product-class of α and β, and is denoted by ‘α.β’, to be
read ‘α and β’. Since α.β is a subclass of α, it can at most contain a finite number of elements (it may be empty).
The number of elements in α.β is denoted by ‘N(α.β)’.
Whilst we symbolize (finite) numbers of elements by N, the relative frequencies are symbolized by F′′. For example,
‘the relative frequency of the property β within the finite reference-class α’ is written ‘αF′′(β)’, which may be read
‘the α-frequency of β’. We can now define the relative frequency.
"""
def relfreq(n, alpha, beta):
"""
Relative frequency: For some function n that returns the number of fives thrown yesterday with this die when
given alpha and beta, and, when given only alpha, it returns the total number of throws yesterday.
"""
return n(alpha, beta) / n(alpha)
| [
"shussainather@gmail.com"
] | shussainather@gmail.com |
549f5cdd2f1ceb5c3304e98da3e3fb0df1f12544 | 8f4f9a07fa25490289b76253971b2ae8c386e8cd | /huaweicloud-sdk-kafka/setup.py | 3f33f92192faaa07344e21f9595dfb8db145ea7d | [
"Apache-2.0"
] | permissive | xingkongcwb/huaweicloud-sdk-python-v3 | 5c635af84a9fb4fb37c45a8de38f691724ca5e31 | 007d5c54ff71f0a2d7b52dcc53e3d38dec4fe775 | refs/heads/master | 2023-03-23T09:56:10.606710 | 2021-03-19T12:47:29 | 2021-03-19T12:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | # coding: utf-8
from os import path
from setuptools import setup, find_packages
NAME = "huaweicloudsdkkafka"
VERSION = "3.0.37-rc"
AUTHOR = "HuaweiCloud SDK"
AUTHOR_EMAIL = "hwcloudsdk@huawei.com"
URL = "https://github.com/huaweicloud/huaweicloud-sdk-python-v3"
DESCRIPTION = "Kafka"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README_PYPI.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
REQUIRES = ["huaweicloudsdkcore"]
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache LICENSE 2.0",
url=URL,
keywords=["huaweicloud", "sdk", "Kafka"],
packages=find_packages(exclude=["tests*"]),
install_requires=REQUIRES,
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development'
]
)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
4a5516634363232525aa64ac4cf02350279fd5aa | e20ff12b280bcae1ee6436a0a2285a10fce7baf0 | /Proposal_Extraction_Code/py-faster-2/tools/demo.py | 7d08a57f24a2fb7f4b7618a1e657f7c46e2e9a66 | [] | no_license | SeokHeel/face_classification_ccbr2016 | e5037c86d9ed33bf375101b0ce1eab97c45b4199 | 05f5664d41ebffb89389902423479db2a64e2501 | refs/heads/master | 2021-06-08T20:55:50.079297 | 2016-10-16T10:28:11 | 2016-10-16T10:28:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,651 | py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_proposal(im,dets):
im=im[:,:,(2,1,0)]
fig,ax=plt.subplots(figsize=(12,12))
ax.imshow(im,aspect='equal')
for i in xrange(0,len(dets)):
proposal=dets[i,:]
ax.add_patch(
plt.Rectangle((proposal[0],proposal[1]),
proposal[2]-proposal[0],
proposal[3]-proposal[1],
fill=False,edgecolor='green',linewidth=3.5)
)
plt.axis('off')
plt.tight_layout()
plt.draw()
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
# transfer BGR to RGB
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
#debug for proposal
# vis_proposal(im,proposal)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
# im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
# '001763.jpg', '004545.jpg']
im_names=['000456.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| [
"davidsonic@163.com"
] | davidsonic@163.com |
ec3ad99b79a2d058b98fd32d979c31f5dc59e36b | 4af281a1b2992de4dceb37ef91f635c44e4f7dcd | /keymaster/server/model/entry_tag.py | f0dafd5d3a14eec05412335222adc013d1a49fe0 | [
"Apache-2.0"
] | permissive | shiroyuki/keymaster | e9772b50c4966ef2ee00860c934a161af60007e3 | 1efee54427378394ab04d0e53247eb38c28bc97c | refs/heads/master | 2020-12-23T11:29:19.262198 | 2020-02-09T06:20:50 | 2020-02-09T06:20:50 | 237,137,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from dataclasses import dataclass
from uuid import uuid4
from xmode.db.analyzer import default, constraint, identified_by, stored_in
from xmode.db.definitions import UUID, String
@stored_in('entry_tags')
@identified_by('id') # This is a PK.
@constraint('index', ('owner_id',))
@constraint('unique', ('owner_id', 'entry_id',))
@constraint('unique', ('owner_id', 'name',))
@default('id', lambda: str(uuid4()))
@dataclass
class EntryTag:
id: UUID
owner_id: UUID
entry_id: UUID
name: String
| [
"jnopporn@shiroyuki.com"
] | jnopporn@shiroyuki.com |
d5c63e22901bf27c426b46d99840236846c3fc62 | ccc86a5029ff00b478685fe8ae365db141096927 | /shop/migrations/0001_initial.py | 6b30a5fe0d0b92676ee132213ecea53afdab2dbe | [] | no_license | jaishivnani/MyAwesomeCart | d0359744d80aa5c29fb77b91eb93434dbf72fd20 | 7ea29cb190b281a3ec2a5385e783a6660f0f905e | refs/heads/main | 2023-01-04T20:45:22.229027 | 2020-10-24T18:27:16 | 2020-10-24T18:27:16 | 305,470,988 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # Generated by Django 3.1 on 2020-08-27 16:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=50)),
('desc', models.CharField(max_length=300)),
('pub_date', models.DateField()),
],
),
]
| [
"jaishivnani30@gmail.com"
] | jaishivnani30@gmail.com |
e15b690026e60540430f1e6f9ef59773e11ad73b | d67bd00f8fe819bd3011ce154c19cbc765d59f1d | /branches/3.2_buildout/il/sapl/skins/consultas/protocolo/protocolo_index_html.py | e345773faec152ee9eb9e8df142a93e8c00973f7 | [] | no_license | openlegis-br/sagl | 90f87bdbbaa8a6efe0ccb5691ea8424575288c46 | eabf7529eefe13a53ed088250d179a92218af1ed | refs/heads/master | 2023-08-31T12:29:39.382474 | 2023-08-29T16:12:01 | 2023-08-29T16:12:01 | 32,593,838 | 17 | 1 | null | 2023-08-29T06:16:55 | 2015-03-20T16:11:04 | Python | UTF-8 | Python | false | false | 371 | py | from Products.CMFCore.utils import getToolByName
request=context.REQUEST
mt = getToolByName(context, 'portal_membership')
if mt.isAnonymousUser():
redirect_url=context.portal_url()+'/consultas/protocolo/pesquisa_publica_form'
else:
redirect_url=context.portal_url()+'/consultas/protocolo/protocolo_pesquisar_form'
request.RESPONSE.redirect(redirect_url)
| [
"contato@openlegis.com.br"
] | contato@openlegis.com.br |
31e8ef8e14aef22b0ad325db319efbf0418237b6 | f5b36f10e6c9c1dbe70208c291c7af5f3e0d39c9 | /client_src/client.py | c15f8f6b879e881dc1deceddbc90b73c589a7112 | [] | no_license | gandolfreddy/esp8266_project | 3a1eabd70f41dcf76b4c8e62cde1a926794841ab | 08625955fba09e14d6d3b18e391824fcf3f2456a | refs/heads/main | 2023-05-29T18:10:32.509840 | 2021-06-17T19:18:00 | 2021-06-17T19:18:00 | 377,606,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | import socket
from machine import Pin
from time import sleep_ms
def start():
led = Pin(2, Pin.OUT, value=1)
sw_led = Pin(0, Pin.IN, Pin.PULL_UP)
sw_bye = Pin(12, Pin.IN, Pin.PULL_UP)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect(("192.168.4.1", 13326))
while True:
print("Press any button")
while sw_led.value() and sw_bye.value():
pass
if not sw_led.value():
msg = "led change"
while not sw_led.value():
pass
if not sw_bye.value():
msg = "bye"
while not sw_bye.value():
pass
s.send(msg.encode("utf-8"))
reply = s.recv(128)
if reply == b'quit':
print("Disconnect")
s.close()
break
print(str(reply))
| [
"noreply@github.com"
] | gandolfreddy.noreply@github.com |
57580b731fc5be2d1f3b6c50e77441d76337e23f | 3cef23043a4bf3bc2a37d952e51b1a9faeb76d0b | /tests/widgets/test_mdselect.py | f103357802890708ffc5494965f13e07c3cde412 | [
"MIT"
] | permissive | hiroaki-yamamoto/django-nghelp | 794bc103ecf5bb652363e3a1df530afa971ac46a | e15dc408a4a9205d23f9d68b6d10d7b9648dbd2e | refs/heads/master | 2020-07-29T21:41:23.972244 | 2018-01-15T04:30:49 | 2018-01-15T04:30:49 | 73,657,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,463 | py | #!/usr/bin/env python
# coding=utf-8
"""MDSelect Tests."""
from django import setup
from django.test import TestCase
from django_nghelp.widgets import MDSelect
setup()
class SimpleMDSelectTest(TestCase):
"""Simple MDSelect Usage test."""
def setUp(self):
"""Setup."""
self.field = MDSelect(choices=(
("test", "Test"), ("test2", "Test2"), (None, "Test3")
))
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option data-selected>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
def test_render_has_value(self):
"""The generated html should be correct."""
result = str(self.field.render("result", "test")).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\" data-selected>"
"Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
def test_render_unselectable_value(self):
"""The generated html should be correct."""
result = str(self.field.render("result", "test_a")).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
class MDSelectGroupingTest(TestCase):
"""MDSelect Grouping test."""
def setUp(self):
"""Setup."""
self.field = MDSelect(
choices=(
("test", (
("testTest1", "Test Test 1"),
("testTest2", "Test Test 2")
)),
("test2", "Test2")
)
)
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-optgroup data-label=\"test\">"
"<md-option data-value=\"testTest1\">Test Test 1</md-option>"
"<md-option data-value=\"testTest2\">Test Test 2</md-option>"
"</md-optgroup>"
"<md-option data-value=\"test2\">Test2</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
class MDSelectEmptyFieldTest(TestCase):
"""MDSelect Test without any options."""
def setUp(self):
"""Setup."""
self.field = MDSelect()
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = "<md-select data-name=\"result\"></md-select>"
self.assertEqual(result, data)
class MDSelectDisableSelectTest(TestCase):
"""MDSelect test with disabled selection."""
def setUp(self):
"""Setup."""
self.field = MDSelect(disable_select=True, choices=(
("test", "Test"), ("test2", "Test2"), (None, "Test3")
))
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
def test_render_has_value(self):
"""The generated html should be correct."""
result = str(self.field.render("result", "test")).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
| [
"hiroaki@hysoftware.net"
] | hiroaki@hysoftware.net |
3439cd72da514cd08f996686a5271e11db6ec5df | 2f989d067213e7a1e19904d482a8f9c15590804c | /lib/python3.4/site-packages/faker/utils/decorators.py | 4dc5f2fa92aff4383077799a7f5457c4e9b98750 | [
"MIT"
] | permissive | levabd/smart4-portal | beb1cf8847134fdf169ab01c38eed7e874c66473 | 2c18ba593ce7e9a1e17c3559e6343a14a13ab88c | refs/heads/master | 2023-02-18T05:49:40.612697 | 2022-08-02T09:35:34 | 2022-08-02T09:35:34 | 116,001,098 | 0 | 1 | MIT | 2023-02-15T21:34:01 | 2018-01-02T10:00:07 | Roff | UTF-8 | Python | false | false | 534 | py | # coding=utf-8
from functools import wraps
from faker.utils import text
def slugify(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return text.slugify(fn(*args, **kwargs))
return wrapper
def slugify_domain(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return text.slugify(fn(*args, **kwargs), allow_dots=True)
return wrapper
def slugify_unicode(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return text.slugify(fn(*args, **kwargs), allow_unicode=True)
return wrapper
| [
"levabd@gmail.com"
] | levabd@gmail.com |
1b739b8ddc0c3ea06a70f43ebd20060c45f6d936 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /dlm_write_f/resource_tag.py | ea3068d36bd5cd92a43e321bfe732414ac7ef3c8 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
untag-resource : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dlm/untag-resource.html
"""
write_parameter("dlm", "tag-resource") | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
aebfaa7b26235eadfb4ca58a1c720541daabd68a | 29da2ca6def1270be13a3096685a8e5d82828dff | /CIM14/IEC61970/LoadModel/ConformLoadGroup.py | 8d8d955dd5c605b9e577b17866563deec9ca5a69 | [
"MIT"
] | permissive | rimbendhaou/PyCIM | 75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3 | d578bb0bf1af344342bd23344385ed9c06c2d0ee | refs/heads/master | 2022-04-28T01:16:12.673867 | 2020-04-16T02:19:09 | 2020-04-16T02:19:09 | 256,085,381 | 0 | 0 | MIT | 2020-04-16T02:15:20 | 2020-04-16T02:08:14 | null | UTF-8 | Python | false | false | 3,544 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.LoadModel.LoadGroup import LoadGroup
class ConformLoadGroup(LoadGroup):
"""A group of loads conforming to an allocation pattern.
"""
def __init__(self, ConformLoadSchedules=None, EnergyConsumers=None, *args, **kw_args):
"""Initialises a new 'ConformLoadGroup' instance.
@param ConformLoadSchedules: The ConformLoadSchedules in the ConformLoadGroup.
@param EnergyConsumers: Conform loads assigned to this ConformLoadGroup.
"""
self._ConformLoadSchedules = []
self.ConformLoadSchedules = [] if ConformLoadSchedules is None else ConformLoadSchedules
self._EnergyConsumers = []
self.EnergyConsumers = [] if EnergyConsumers is None else EnergyConsumers
super(ConformLoadGroup, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ConformLoadSchedules", "EnergyConsumers"]
_many_refs = ["ConformLoadSchedules", "EnergyConsumers"]
def getConformLoadSchedules(self):
"""The ConformLoadSchedules in the ConformLoadGroup.
"""
return self._ConformLoadSchedules
def setConformLoadSchedules(self, value):
for x in self._ConformLoadSchedules:
x.ConformLoadGroup = None
for y in value:
y._ConformLoadGroup = self
self._ConformLoadSchedules = value
ConformLoadSchedules = property(getConformLoadSchedules, setConformLoadSchedules)
def addConformLoadSchedules(self, *ConformLoadSchedules):
for obj in ConformLoadSchedules:
obj.ConformLoadGroup = self
def removeConformLoadSchedules(self, *ConformLoadSchedules):
for obj in ConformLoadSchedules:
obj.ConformLoadGroup = None
def getEnergyConsumers(self):
"""Conform loads assigned to this ConformLoadGroup.
"""
return self._EnergyConsumers
def setEnergyConsumers(self, value):
for x in self._EnergyConsumers:
x.LoadGroup = None
for y in value:
y._LoadGroup = self
self._EnergyConsumers = value
EnergyConsumers = property(getEnergyConsumers, setEnergyConsumers)
def addEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.LoadGroup = self
def removeEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.LoadGroup = None
| [
"rwl@thinker.cable.virginmedia.net"
] | rwl@thinker.cable.virginmedia.net |
3b3b7fb2927e145132ea14c11ad0d36645415be1 | f5377cebd671c743cb44dc76b1ab8ea502c47849 | /scripts/getCommitLink.py | 94ff04a509bd5ff0a3e1ec70bc715f5ac019e677 | [] | no_license | Kechegomz/propMining | 67a38f6b3d62d043c18591381bde15769fd8f72f | fc66861567473e4491f78290f5fcc034bdfc099b | refs/heads/master | 2023-05-04T07:11:32.712962 | 2021-05-28T10:15:56 | 2021-05-28T10:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | import urllib.request
import urllib.error
from bs4 import BeautifulSoup
import ast
import time
import sys
path = sys.argv[1]
def getHtml(url):
try:
fp = urllib.request.urlopen(link)
return fp
except urllib.error.HTTPError as e:
if e.getcode() == 429:
time.sleep(5)
return getHtml(url)
for line in open(path, "r").readlines():
hsh = line.strip().split(" ")[1]
link = "https://github.com/search?q={}&type=commits".format(hsh)
fp = getHtml(link)
mybytes = fp.read()
mystr = mybytes.decode("utf8")
fp.close()
soup = BeautifulSoup(mystr, features="html.parser")
for hyper in soup.find_all("a", {"class": "message markdown-title js-navigation-open"}):
for attrib in hyper["data-hydro-click"].split(","):
tokens = attrib.split(":")
if tokens[0] == "\"url\"":
print(":".join(tokens[1:]).replace("\"","").replace("}",""))
break
time.sleep(2)
| [
"MY_NAME@example.com"
] | MY_NAME@example.com |
7b9f9b5bfad5c0d7ee7eef3d0d08b5cde41e18c4 | ae7d5d11351af9201ce6181c48b8c60363c7ed00 | /lib/galaxy/model/migrate/versions/0157_rework_dataset_validation.py | f315961a724527975006d0548c3d77274c30a0dd | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | natefoo/galaxy | 818037d03f39ccfb3714c7e784fd64d7ad8f4d2e | 64150c5bd803e75ed032e9f15acd003bae92b5ef | refs/heads/master | 2023-08-17T02:57:02.580487 | 2020-03-26T13:33:01 | 2020-03-26T13:33:01 | 31,212,836 | 2 | 1 | NOASSERTION | 2019-04-25T12:30:28 | 2015-02-23T15:01:46 | Python | UTF-8 | Python | false | false | 2,218 | py | """
Rework dataset validation in database.
"""
from __future__ import print_function
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
Table,
TEXT,
)
from galaxy.model.custom_types import TrimmedString
from galaxy.model.migrate.versions.util import add_column, create_table, drop_column, drop_table
log = logging.getLogger(__name__)
metadata = MetaData()
validation_error_table = Table("validation_error", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("message", TrimmedString(255)),
Column("err_type", TrimmedString(64)),
Column("attributes", TEXT))
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
drop_table(validation_error_table)
history_dataset_association_table = Table("history_dataset_association", metadata, autoload=True)
library_dataset_dataset_association_table = Table("library_dataset_dataset_association", metadata, autoload=True)
for dataset_instance_table in [history_dataset_association_table, library_dataset_dataset_association_table]:
validated_state_column = Column('validated_state', TrimmedString(64), default='unknown', server_default="unknown", nullable=False)
add_column(validated_state_column, dataset_instance_table, metadata)
validated_state_message_column = Column('validated_state_message', TEXT)
add_column(validated_state_message_column, dataset_instance_table, metadata)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
create_table(validation_error_table)
history_dataset_association_table = Table("history_dataset_association", metadata, autoload=True)
library_dataset_dataset_association_table = Table("library_dataset_dataset_association", metadata, autoload=True)
for dataset_instance_table in [history_dataset_association_table, library_dataset_dataset_association_table]:
drop_column('validated_state', dataset_instance_table, metadata)
drop_column('validated_state_message', dataset_instance_table, metadata)
| [
"jmchilton@gmail.com"
] | jmchilton@gmail.com |
4caba39636403bb61752fffbb21005e82c319669 | 78b44dff4ca19aba4815a383f0e715a7ce178703 | /src/sort/leetcode242_ValidAnagram.py | 868c94ea3964fe2a13d4813167b243696b106043 | [] | no_license | apepkuss/Cracking-Leetcode-in-Python | 80e5e9fd407441db77652fc480f523d3636281c1 | cbe6a7e7f05eccb4f9c5fce8651c0d87e5168516 | refs/heads/master | 2021-09-03T00:11:05.434202 | 2018-01-04T07:38:02 | 2018-01-04T07:38:02 | 85,363,605 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py |
class Solution(object):
"""
@ Amazon, Uber, Yelp
Hash Table
Given two strings s and t, write a function to determine if t is an anagram of s.
For example,
s = "anagram", t = "nagaram", return true.
s = "rat", t = "car", return false.
Note:
You may assume the string contains only lowercase alphabets.
Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
"""
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
m, n = len(s), len(t)
# check if their length are same
if m != n: return False
# use hashtable to compute the number of each character in s and t
s_table = {}
t_table = {}
for i in range(len(s)):
if s[i] not in s_table:
s_table[s[i]] = 1
else:
s_table[s[i]] += 1
if t[i] not in t_table:
t_table[t[i]] = 1
else:
t_table[t[i]] += 1
# check if s and t have same number of characters
for k, v in s_table.items():
if k not in t_table or v != t_table[k]:
return False
return True
if __name__ == "__main__":
s = "a"
t = "a"
res = Solution().isAnagram(s, t)
print res | [
"xin.sam.liu@hotmail.com"
] | xin.sam.liu@hotmail.com |
74d56f3d4e8a30326e6d5c28fe262e9b50446a41 | 64b2026bd0b6f3be8b6e881ec9ddd9ca432e20f6 | /gendiff/formats/__init__.py | b101a79de81280debfef4fe44ae7d4992275692f | [] | no_license | akocur/python-project-lvl2 | e8a973902fbbbbb29a5081f1f76c6d33a13e8996 | 706fe3b9b48679e1cf02763c2459883be4bf028f | refs/heads/main | 2023-07-16T04:16:37.107677 | 2021-09-03T09:44:26 | 2021-09-03T09:44:26 | 396,692,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from gendiff.formats.plain import plain
from gendiff.formats.stylish import stylish
from gendiff.formats.json import formatted_to_json
FORMAT_PLAIN = 'plain'
FORMAT_JSON = 'json'
FORMAT_STYLISH = 'stylish'
def get_formatter(format_name):
"""
Return formatter by format_name.
:param format_name: str
one of the available formatters.
:return: formatting function
"""
return {
FORMAT_STYLISH: stylish,
FORMAT_PLAIN: plain,
FORMAT_JSON: formatted_to_json,
}.get(format_name)
def get_default_format_name():
"""Return default format name."""
return FORMAT_STYLISH
def get_available_formatters():
"""Return available formatters."""
return [FORMAT_STYLISH, FORMAT_PLAIN, FORMAT_JSON]
| [
"akocur@yandex.ru"
] | akocur@yandex.ru |
5ea90cb07677aae40edaea3b4f2b8ca14d93ff57 | 3834a683bc7f3eb66615fad8c95d2f9400ca825a | /Palinlink.py | c89bde0f7828267cfa4484e449cfee06748108c6 | [] | no_license | balajimanikandanm/python3 | 5d0ae2a0fd2e20426ee9ac5dfc8a26eb0117aa29 | ed151ee91c935dc7ecb2c0e54c4e7b107a32c5e7 | refs/heads/master | 2020-05-31T22:37:44.534017 | 2019-08-14T10:24:00 | 2019-08-14T10:24:00 | 190,523,807 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, data):
self.items.append(data)
def pop(self):
return self.items.pop()
sb = Stack()
text = input()
for character in text:
sb.push(character)
reversed_text = ''
while not sb.is_empty():
reversed_text = reversed_text + sb.pop()
if text == reversed_text:
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | balajimanikandanm.noreply@github.com |
692205fd486af2933f22524eb23723379f93900b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/160/30989/submittedfiles/swamee.py | 26285a5016005edac6a49bc82c98cdb39ca1a975 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('Digite f:'))
L=float(input('Digite L:'))
Q=float(input('Digite Q:'))
DeltaH=float(input('Digite DeltaH:'))
v=float(input('Digite v:'))
g=9.81
E=0.000002
D=( (8*f*L*(Q*Q))/((math.pi*math.pi)*g*DeltaH) )**(1/5)
Rey=(4*Q)/(math.pi*D*v)
K=(0.25)/(math.log*10(E/(3.7*D)+5.74/Rey**0.9)**2
print('%.4f' %D)
print('%.4f' %Rey)
print('%.4f' %K)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8d6f84d3ad7afe3a91c3e23423f26b79f70a9767 | e953ae5da775a934b86379cfa3d864bb7376fe36 | /08 tkinter_python/17.py | a8791e8dce3ded8dac4247414aba0ab26a08bdac | [] | no_license | agyenes/greenfox-exercises | 1481f17d1ddd78099d17022aa1800955ae39d92b | a2c7912c61708c6ebc53c9a22f8c09550432d4c3 | refs/heads/master | 2020-04-11T00:42:17.842170 | 2016-10-19T06:10:22 | 2016-10-19T06:10:22 | 68,081,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from tkinter import *
import math
root = Tk()
canvas = Canvas(root, width='500', height='500', bg='white')
canvas.pack()
size = 20
for i in range(23):
canvas.create_line(20 + i * size, 420, 250 + i * size/2, 20 + i * math.sqrt(3) * 10, fill='green')
canvas.create_line(250 - i * size/2, 20 + i * math.sqrt(3) * 10, 480 - i * size, 420, fill='blue')
canvas.create_line(480 - i * size/2, 420 - i * math.sqrt(3) * 10, 20 + i * size/2, 420 - i * math.sqrt(3) * 10, fill='red')
canvas.pack()
root.mainloop()
| [
"aron.gyenes@gmail.com"
] | aron.gyenes@gmail.com |
da534bbca9d72e0ec477c30ac4656daf831758e7 | f135ace9411167259588fc5f52dd2f300d4b1551 | /C++/trans.py | 5b3a7ae492a330370ece256ba40069c60ebe7d2e | [] | no_license | celestialized/FPGA_feedforward-neural-network_for_qubit_discrimination | c0cd74704bd6a63d3be2fb22db371c995ebe836f | 1ad7fd0b37a45fe249b28b8b38f25be152b56d45 | refs/heads/master | 2021-09-20T15:56:24.582204 | 2018-08-11T11:26:32 | 2018-08-11T11:26:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import numpy as np
import random
import cPickle as pickle
import matplotlib.pyplot as plt
import argparse
import math
import gzip
f =gzip.open('./DetectionBinsData_pickle615_clean.gzip','rb') #49664*100 times measurement
fb=open('bright.txt','w')
fd=open('dark.txt','w')
num_bins=100
for k in range(10000):
print(k)
d_data=pickle.load(f)[:,1:num_bins+1]
b_data=pickle.load(f)[:,102:102+num_bins]
#print(d_data,b_data)
for i in range(100):
for j in range(num_bins):
fd.write(str(d_data[i][j]))
fb.write(str(b_data[i][j]))
fd.write('\n')
fb.write('\n')
fd.close()
fb.close() | [
"1402434478@qq.com"
] | 1402434478@qq.com |
a4d8a3b9ba56807058e9ba8b2d56bcaf4d272a41 | ae5bb043439d2bad96a7017a57c6b83dd66c6ffb | /setup.py | a6cf117b5fddab5ce77c4a21b22fc5d0511e5d7f | [
"MIT"
] | permissive | kyleabeauchamp/xopen | e27be7a2de5ca665d5ea394aa9b8aff5bf4649a3 | c12a94e9f9af49da37edb20d6e82fb0bd5f4a08a | refs/heads/master | 2020-12-25T22:48:38.628011 | 2016-09-09T15:56:46 | 2016-09-09T15:56:46 | 68,322,235 | 0 | 0 | null | 2016-09-15T18:53:50 | 2016-09-15T18:53:49 | null | UTF-8 | Python | false | false | 784 | py | import sys
from setuptools import setup
if sys.version_info < (2, 6):
sys.stdout.write("At least Python 2.6 is required.\n")
sys.exit(1)
setup(
name = 'xopen',
version = '0.1.0',
author = 'Marcel Martin',
author_email = 'mail@marcelm.net',
url = 'https://github.com/marcelm/xopen/',
description = 'Open compressed files transparently',
license = 'MIT',
py_modules = ['xopen'],
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Cython",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
| [
"marcel.martin@scilifelab.se"
] | marcel.martin@scilifelab.se |
29286514242944ff7946769d77ea552685d38ff7 | b4cc610bbd069c2b3e1f50c82303d48de21843a4 | /ce/c008_test.py | 694bc09965957756b549d1d77cfaaf1fa9801360 | [] | no_license | AakashKumarNain/pythonesque | d47b890ff42fa7baa3f25f9569d8a7310c7aa710 | 3225aaf878c52962becafd60a50243a91f92b264 | refs/heads/master | 2020-03-18T00:07:00.624695 | 2018-05-19T09:24:16 | 2018-05-19T09:24:16 | 134,078,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """
CodeEval Reverse Words
author: Manny egalli64@gmail.com
info: http://thisthread.blogspot.com/2017/02/codeeval-reverse-words.html
https://www.codeeval.com/open_challenges/8/
"""
import unittest
from ce.c008 import solution
class TestCodeEval(unittest.TestCase):
def test_provided_1(self):
self.assertEqual('World Hello', solution('Hello World'))
if __name__ == '__main__':
unittest.main()
| [
"egalli64@gmail.com"
] | egalli64@gmail.com |
8a8b5e3641d7d2725ccfaf4069df1791c26f75fa | 6f4d104a5d87fa6e7a113139224158dae791eb18 | /models/vanilla_vae_bak.py | 46e8a7e7488e87f3856357259913ecc62d38be42 | [] | no_license | yellowbeango/VAE_NSF | b7c30996764d7d7f12499111e8e9db93c6d201c0 | 3bfc068b3363ffe53ceddc6f1adb0fa25afd4d31 | refs/heads/master | 2023-03-11T14:38:33.377391 | 2021-03-04T04:07:36 | 2021-03-04T04:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,668 | py | import torch
from .base import BaseVAE
from torch import nn
from torch.nn import functional as F
from abc import abstractmethod
from typing import List, Callable, Union, Any, TypeVar, Tuple
# from torch import tensor as Tensor
Tensor = TypeVar('torch.tensor')
__all__ = ['VanillaVAE']
class VanillaVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
**kwargs) -> None:
super(VanillaVAE, self).__init__()
self.latent_dim = latent_dim
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=1,
kernel_size=3, padding=1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss = F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss': recons_loss, 'KLD': -kld_loss}
def sample(self, z, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
# z = torch.randn(num_samples,
# self.latent_dim)
#
# z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
| [
"xuma@my.unt.edu"
] | xuma@my.unt.edu |
79b286b760c71c260b3537fdac6de52e607aa4c1 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004022.py | 30655fd8714685bb1fb65a8edca5cb26a2b8e4e4 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher62483(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher62483._instance is None:
CommutativeMatcher62483._instance = CommutativeMatcher62483()
return CommutativeMatcher62483._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 62482
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
0de5dd2b9f24eb71dd2be78f458863fd9dcb879c | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/tables/10/4_2_2_4_table.py | 7eb24c3ccf8358f9e6b0619628378ca739ad3f6f | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Fire outlook', 'units': 'Code table 4.224'},
{'abbr': 1,
'code': 1,
'title': 'Fire outlook due to dry thunderstorm',
'units': 'Code table 4.224'},
{'abbr': 2, 'code': 2, 'title': 'Haines Index', 'units': 'Numeric'},
{'abbr': 3, 'code': 3, 'title': 'Fire burned area', 'units': '%'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| [
"baudouin.raoult@ecmwf.int"
] | baudouin.raoult@ecmwf.int |
3ba33009a6ff59d33e9edeafa1a50d110a7cee0b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_120/763.py | f7e5aa872796b1b2cae4a6e30617c689add81644 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | f = open("A-small-attempt0.in", "r")
o = open("Bullseye-A-small-attempt0-out.txt", "w")
T = int(f.readline())
for t in range(T):
count = 0
randt = f.readline().split()
r = int(randt[0]) #white circle radius
paint = int(randt[1]) #mL of paint
rIn = r
rOut = r + 1
nextArea = rOut**2 - rIn**2
while paint >= nextArea:
count += 1
paint -= nextArea
rOut += 2
rIn += 2
nextArea = rOut**2 - rIn**2
o.write("Case #" + str(t+1) + ": " + str(count) + "\n")
f.close()
o.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e8d80ddc0ce9ff77bb3c1ba7f309bd2c74e9afc6 | a79ed9e33fe76af58b34082e4fe762716e38e80d | /Stage3/method2_feature_vector.py | 48cfb9871fb375683d208d9912c8fc9122d123c0 | [] | no_license | xwang322/Data-Science | c7a5a617cbb97787905c79cbed9cefd4362cd77f | 6ae6fb912cf8b1b743ae4a4e3fffb99b2f496376 | refs/heads/master | 2021-01-12T09:53:15.596743 | 2016-12-19T22:39:28 | 2016-12-19T22:39:28 | 76,287,261 | 0 | 0 | null | 2016-12-18T00:12:52 | 2016-12-12T19:13:53 | Python | UTF-8 | Python | false | false | 6,236 | py | import re
import random
import json
from random import choice, sample, randint
from py_stringmatching import simfunctions, tokenizers
import string_process2
from sklearn.preprocessing import Imputer
import numpy as np
feature_attr = ['id','product type','product name','product segment','brand','category']
#with open('X.txt','r') as f:
with open('5000Test.txt','r') as f:
for i,l in enumerate(f):
pass
f.close()
#print(i)
matrix = [['null' for j in range(6)] for j in range (2*(i+1))]
result = ['null' for j in range(i+1)]
#with open('X.txt','r') as f:
l = 0
with open('5000Test.txt','r') as f:
lines = f.readlines()
r = 0
m = 0
for i in lines:
dict_tmp1 = {}
dict_tmp2 = {}
items = i.split('?')
json_id1 = items[1]
json_id2 = items[3]
id1 = re.findall(r'[\d|]+', json_id1)
id2 = re.findall(r'[\d|]+', json_id2)
json_data1 = json.loads(items[2])
for each in json_data1.keys():
aname = each
bname = json_data1.get(aname)
cname = ''.join(bname)
if aname.lower() in feature_attr:
attrPost = feature_attr.index(aname.lower())
dict_tmp1.setdefault(aname.lower(), cname.lower())
for each in feature_attr:
if each not in dict_tmp1.keys():
dict_tmp1.setdefault(each, '')
matrix[r][0] = id1
matrix[r][1] = dict_tmp1.get('product type')
matrix[r][2] = dict_tmp1.get('product name')
matrix[r][3] = dict_tmp1.get('product segment')
matrix[r][4] = dict_tmp1.get('brand')
matrix[r][5] = dict_tmp1.get('category')
# for product 2
json_data2 = json.loads(items[4])
for each in json_data2.keys():
aname = each
bname = json_data2.get(aname)
cname = ''.join(bname)
if aname.lower() in feature_attr:
attrPost = feature_attr.index(aname.lower())
dict_tmp2.setdefault(aname.lower(), cname.lower())
for each in feature_attr:
if each not in dict_tmp2.keys():
dict_tmp2.setdefault(each, '')
matrix[r+1][0] = id2
matrix[r+1][1] = dict_tmp2.get('product type')
matrix[r+1][2] = dict_tmp2.get('product name')
matrix[r+1][3] = dict_tmp2.get('product segment')
matrix[r+1][4] = dict_tmp2.get('brand')
matrix[r+1][5] = dict_tmp2.get('category')
result[m] = items[5]
r += 2
m += 1
#print(len(matrix))
#print(result)
f.close()
#x = open('X_matrix.txt','w')
x = open('5000Test_matrix.txt','w')
for each in matrix:
print(each, file = x)
x.close()
#x = open('X_matrix_class.txt','w')
x = open('5000Test_matrix_class.txt','w')
for each in result:
print(each, file = x)
x.close()
i = int(len(matrix)/2-1)
#print(i)
FVmatrix = [[0 for j in range(11)] for j in range(i+1)]
r = 0
for r in range(i+1):
# product type: needleman_wunsch distance
if(matrix[2*r][1] == '' and matrix[2*r+1][1] == ''):
FVmatrix[r][0] = 999
else:
FVmatrix[r][0] = simfunctions.needleman_wunsch(matrix[2*r][1], matrix[2*r+1][1])
# product type: soft tfidf distance
if(matrix[2*r][1] == '' and matrix[2*r+1][1] == ''):
FVmatrix[r][1] = 999
else:
product_type1 = string_process2.string_process2(matrix[2*r][1])
product_type2 = string_process2.string_process2(matrix[2*r+1][1])
FVmatrix[r][1] = simfunctions.soft_tfidf(set(product_type1), set(product_type2))
product_name1 = string_process2.string_process2(matrix[2*r][2])
product_name2 = string_process2.string_process2(matrix[2*r+1][2])
#print(product_name1, product_name2)
# product name: soft TF/IDF
FVmatrix[r][2] = simfunctions.soft_tfidf(set(product_name1), set(product_name2))
#product name: jaccard score
FVmatrix[r][3] = simfunctions.jaccard(set(product_name1), set(product_name2))
#product segment: needleman_wunsch distance
if(matrix[2*r][3] == '' and matrix[2*r+1][3] == ''):
FVmatrix[r][4] = 999
else:
FVmatrix[r][4] = simfunctions.needleman_wunsch(matrix[2*r][3], matrix[2*r+1][3])
#product segment: soft tfidf distance
if(matrix[2*r][3] == '' and matrix[2*r+1][3] == ''):
FVmatrix[r][5] = 999
else:
product_seg1 = string_process2.string_process2(matrix[2*r][3])
product_seg2 = string_process2.string_process2(matrix[2*r+1][3])
FVmatrix[r][5] = simfunctions.soft_tfidf(set(product_seg1), set(product_seg2))
#brand: needleman_wunsch distance
if(matrix[2*r][4] == '' and matrix[2*r+1][4] == ''):
FVmatrix[r][6] = 999
else:
FVmatrix[r][6] = simfunctions.needleman_wunsch(matrix[2*r][4], matrix[2*r+1][4])
#brand: soft tfidf distance
if(matrix[2*r][4] == '' and matrix[2*r+1][4] == ''):
FVmatrix[r][7] = 999
else:
product_brand1 = string_process2.string_process2(matrix[2*r][4])
product_brand2 = string_process2.string_process2(matrix[2*r+1][4])
FVmatrix[r][7] = simfunctions.soft_tfidf(set(product_brand1), set(product_brand2))
# category: needleman_wunsch distance
if(matrix[2*r][5] == '' and matrix[2*r+1][5] == ''):
FVmatrix[r][8] = 999
else:
FVmatrix[r][8] = simfunctions.needleman_wunsch(matrix[2*r][5], matrix[2*r+1][5])
# category: soft tfidf distance
if(matrix[2*r][5] == '' and matrix[2*r+1][5] == ''):
FVmatrix[r][9] = 999
else:
product_category1 = string_process2.string_process2(matrix[2*r][5])
product_category2 = string_process2.string_process2(matrix[2*r+1][5])
FVmatrix[r][9] = simfunctions.soft_tfidf(set(product_category1), set(product_category2))
if(result[r] == 'MATCH\n'):
FVmatrix[r][10] = 1
else:
FVmatrix[r][10] = 0
#print(FVmatrix)
#x = open('X_feature_vector.txt','w')
x = open('5000Test2_string_processed_feature_vector.txt','w')
for each in FVmatrix:
print(each, file = x)
x.close()
| [
"noreply@github.com"
] | xwang322.noreply@github.com |
1cb23cdd8301f2345e6cfe91689a1091d605faf8 | 727cdc7c9af6fdf6b4eb8444197718e5c6760019 | /review_qa_collect/translate.py | 14afdd26ccfc51a61f3881d1b1c7c8b9b073d693 | [] | no_license | newer027/amazon_crawler | 0cc6feb30f9180ae48ac936eeb6af41ec06eadfd | 39d6867a8dd56b90dae5e98aa44e6df274439f8e | refs/heads/master | 2022-11-23T17:04:33.995126 | 2020-04-03T15:42:42 | 2020-04-03T15:42:42 | 252,774,253 | 1 | 0 | null | 2022-11-22T01:44:53 | 2020-04-03T15:42:31 | CSS | UTF-8 | Python | false | false | 4,117 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, Template
from django.views.decorators.csrf import csrf_exempt
from django.utils.encoding import smart_str, smart_unicode
import xml.etree.ElementTree as ET
import urllib,urllib2,time,hashlib
TOKEN = "your token"
YOUDAO_KEY = your_youdao_key
YOUDAO_KEY_FROM = "your_youdao_key_from"
YOUDAO_DOC_TYPE = "xml"
@csrf_exempt
def handleRequest(request):
if request.method == 'GET':
#response = HttpResponse(request.GET['echostr'],content_type="text/plain")
response = HttpResponse(checkSignature(request),content_type="text/plain")
return response
elif request.method == 'POST':
#c = RequestContext(request,{'result':responseMsg(request)})
#t = Template('{{result}}')
#response = HttpResponse(t.render(c),content_type="application/xml")
response = HttpResponse(responseMsg(request),content_type="application/xml")
return response
else:
return None
def checkSignature(request):
global TOKEN
signature = request.GET.get("signature", None)
timestamp = request.GET.get("timestamp", None)
nonce = request.GET.get("nonce", None)
echoStr = request.GET.get("echostr",None)
token = TOKEN
tmpList = [token,timestamp,nonce]
tmpList.sort()
tmpstr = "%s%s%s" % tuple(tmpList)
tmpstr = hashlib.sha1(tmpstr).hexdigest()
if tmpstr == signature:
return echoStr
else:
return None
def responseMsg(request):
rawStr = smart_str(request.raw_post_data)
#rawStr = smart_str(request.POST['XML'])
msg = paraseMsgXml(ET.fromstring(rawStr))
queryStr = msg.get('Content','You have input nothing~')
raw_youdaoURL = "http://fanyi.youdao.com/openapi.do?keyfrom=%s&key=%s&type=data&doctype=%s&version=1.1&q=" % (YOUDAO_KEY_FROM,YOUDAO_KEY,YOUDAO_DOC_TYPE)
youdaoURL = "%s%s" % (raw_youdaoURL,urllib2.quote(queryStr))
req = urllib2.Request(url=youdaoURL)
result = urllib2.urlopen(req).read()
replyContent = paraseYouDaoXml(ET.fromstring(result))
return getReplyXml(msg,replyContent)
def paraseMsgXml(rootElem):
msg = {}
if rootElem.tag == 'xml':
for child in rootElem:
msg[child.tag] = smart_str(child.text)
return msg
def paraseYouDaoXml(rootElem):
replyContent = ''
if rootElem.tag == 'youdao-fanyi':
for child in rootElem:
# 错误码
if child.tag == 'errorCode':
if child.text == '20':
return 'too long to translate\n'
elif child.text == '30':
return 'can not be able to translate with effect\n'
elif child.text == '40':
return 'can not be able to support this language\n'
elif child.text == '50':
return 'invalid key\n'
# 查询字符串
elif child.tag == 'query':
replyContent = "%s%s\n" % (replyContent, child.text)
# 有道翻译
elif child.tag == 'translation':
replyContent = '%s%s\n%s\n' % (replyContent, '-' * 3 + u'有道翻译' + '-' * 3, child[0].text)
# 有道词典-基本词典
elif child.tag == 'basic':
replyContent = "%s%s\n" % (replyContent, '-' * 3 + u'基本词典' + '-' * 3)
for c in child:
if c.tag == 'phonetic':
replyContent = '%s%s\n' % (replyContent, c.text)
elif c.tag == 'explains':
for ex in c.findall('ex'):
replyContent = '%s%s\n' % (replyContent, ex.text)
# 有道词典-网络释义
elif child.tag == 'web':
replyContent = "%s%s\n" % (replyContent, '-' * 3 + u'网络释义' + '-' * 3)
for explain in child.findall('explain'):
for key in explain.findall('key'):
replyContent = '%s%s\n' % (replyContent, key.text)
for value in explain.findall('value'):
for ex in value.findall('ex'):
replyContent = '%s%s\n' % (replyContent, ex.text)
replyContent = '%s%s\n' % (replyContent,'--')
return replyContent
def getReplyXml(msg,replyContent):
extTpl = "<xml><ToUserName><![CDATA[%s]]></ToUserName><FromUserName><![CDATA[%s]]></FromUserName><CreateTime>%s</CreateTime><MsgType><![CDATA[%s]]></MsgType><Content><![CDATA[%s]]></Content><FuncFlag>0</FuncFlag></xml>";
extTpl = extTpl % (msg['FromUserName'],msg['ToUserName'],str(int(time.time())),'text',replyContent)
return extTpl | [
"newer027@gmail.com"
] | newer027@gmail.com |
4f3dee7c5f1e1f739b536197af999ea9e784bf3a | 01a33634195c48794ebb46bd19c785283ca7e885 | /backend/run_fast_20841/wsgi.py | 11f4f62c1c3dd85630639fc1ceb4cca1e89bca0e | [] | no_license | crowdbotics-apps/run-fast-20841 | db388a66a2590ae085db76d37ec712edb7892d0a | 7cc3a759d1399aa378903f8db7d85e2c4fcd07bc | refs/heads/master | 2022-12-23T15:50:17.562389 | 2020-09-29T19:26:58 | 2020-09-29T19:26:58 | 299,716,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for run_fast_20841 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "run_fast_20841.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9a9f3d2d56c06b6670be827785e25eb6bc99eb98 | 6a95dc7ee1c583119c892f193cd683499b50a706 | /tests/unit/fixtures/logging.py | c2cc4e6d048f32c78e16acd2ca9122ea32912417 | [
"BSD-3-Clause"
] | permissive | Steffanic/alice-jet-hadron | 735cba4d440f5f87364bf8d47147a0eccf1e1471 | 8526567935c0339cebb9ef224b09a551a0b96932 | refs/heads/master | 2022-12-28T23:02:20.560475 | 2020-10-09T08:03:55 | 2020-10-09T08:03:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | #!/usr/bin/env python
""" Logging related fixtures to aid testing.
.. codeauthor:: Raymond Ehlers <raymond.ehlers@yale.edu>, Yale University
"""
import logging
import pytest
# Set logging level as a global variable to simplify configuration.
# This is not ideal, but fine for simple tests.
logging_level = logging.DEBUG
@pytest.fixture
def logging_mixin(caplog):
""" Logging mixin to capture logging messages from modules.
It logs at the debug level, which is probably most useful for when a test fails.
"""
caplog.set_level(logging_level)
| [
"raymond.ehlers@gmail.com"
] | raymond.ehlers@gmail.com |
c97290f97b929f33265f2e8efdbd481a678ab48b | 5d97cf2d275a0636d8ac3b98c222b6864d1c992e | /server/walt/server/threads/main/snmp/ipsetup.py | ceacf3ff17a93ca4efd81348c6a486d5e1e035d1 | [
"BSD-3-Clause"
] | permissive | ManonBillet/walt-python-packages | 51d57bf710dc6f981040b4295b8bb7811d4462e6 | b778992e241d54b684f54715d83c4aff98a01db7 | refs/heads/master | 2023-05-04T14:58:24.265660 | 2021-02-01T15:16:44 | 2021-02-01T15:16:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | #!/usr/bin/env python
from walt.server.threads.main.snmp.mibs import load_mib
class IPSetupProxy(object):
def __init__(self, snmp_proxy):
self.snmp = snmp_proxy
load_mib(b"NETGEAR-SWITCHING-MIB")
def perform_dhcp_setup(self):
# the switch is already configured to boot using DHCP
# by default, but affecting this value again causes
# the switch to restart the DHCP procedure, which is
# exactly what we expect.
self.snmp.agentNetworkConfigProtocol = 3 # dhcp
def record_current_ip_config_as_static(self):
# if server and switches are restarted, the switches may
# send a DHCP request before the DHCP server of the WalT server
# is started.
# This causes the switches to choose a default address,
# e.g. 192.168.0.239 for Netgear switches.
# This causes major issues because several switches
# may get this same address.
# Thus, the first time a switch is detected with a DHCP IP belonging
# to the WalT network, we statically set this IP in its bootup
# procedure.
current_ip = str(self.snmp.agentNetworkIPAddress)
current_netmask = str(self.snmp.agentNetworkSubnetMask)
with self.snmp as batch:
batch.agentNetworkConfigProtocol = 1
batch.agentNetworkIPAddress = current_ip
batch.agentNetworkSubnetMask = current_netmask
| [
"etienne.duble@imag.fr"
] | etienne.duble@imag.fr |
e2da8ecd31dc1d1ec3354ba5182d031423db4939 | 753f729f33a1b00a0a7f5c78d217cc4c609aee6f | /n13_GenericViewApiAndMixin/api/serializers.py | 325212ac2364203cfaf9ee76c32a3276784d9f4a | [] | no_license | nayan-gujju/DRF-Code | 874114a861042d558112f1a8ec95daf1356d5493 | 6fb3fdd5dde352e7b6e3a7363da0e7a3057b1ede | refs/heads/master | 2023-08-06T12:42:23.551603 | 2021-10-06T11:34:54 | 2021-10-06T11:34:54 | 404,650,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from rest_framework import serializers
from .models import Student
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = '__all__'
| [
"nayangujarati007@gmail.com"
] | nayangujarati007@gmail.com |
91e81dfa4a16fc532038b1b7a075518ec9676dee | c69c3167819efdded3cdde7783514b971a98f25a | /services/parse_file.py | 100f8922a3dd36561f1abb318ace04790ec12cf3 | [] | no_license | sehovizko/sel_parser_rashodnika | 8a03ed6a3ccb4500a227848947fbb7774c7aae4c | 3510df57256e5775a55cafaf70e790196c475e21 | refs/heads/master | 2023-07-26T02:53:31.324946 | 2021-09-07T15:58:25 | 2021-09-07T15:58:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | import ast
import re
import pandas
from db_utils import *
async def parse(file):
# try:
file_list = pandas.read_csv(file, sep=';', header=None).values.tolist()[0]
# for elem in file_list:
# print(elem)
brand = re.sub(r'/.*', '', file_list[0]).strip()
partcode = re.sub(r'(\s.+)', '', re.sub(rf'{brand}', '', file_list[2], flags=re.IGNORECASE).strip()).strip()
if 'nan' in str(file_list[2]):
name_ru = None
else:
name_ru = re.sub(f'{brand}', '', re.sub(f'{partcode}', '', str(file_list[2]), re.I), re.I).strip()
if 'nan' in str(file_list[1]):
name_en = None
else:
name_en = re.sub(r"'", '`', re.sub(r'^/|^-|\([^)]*\)', '', str(file_list[1]))).strip()
model_analogs = ast.literal_eval(file_list[3])
options = ast.literal_eval(file_list[4])
# obj = {'brand': brand, 'partcode': partcode, 'name_en': name_en, 'name_ru': name_ru,
# 'model_analogs': model_analogs, 'options': options}
return {'brand': brand, 'partcode': partcode, 'name_en': name_en, 'name_ru': name_ru,
'model_analogs': model_analogs, 'options': options}
# except Exception as err:
# print(err, file)
async def set_option(options, code):
print(code, options)
pid = get_supplies_id(code)
if pid:
for opt in options:
dic_caption_id = get_dict_partcode_option_id(opt[0])
dic_option_id = get_dict_partcode_option_id(opt[1])
if dic_caption_id and dic_option_id:
option_id = get_option_id(dic_caption_id, dic_option_id)
link_partcode_options(option_id, pid)
else:
print('nod ids for', opt)
else:
print('no id for', code)
async def set_model_analog(models, code, brand, brands):
if brand == 'HP':
# if brand == 'HP' and code == 'CE390A':
print(models, code, brand, brands)
# if brand == 'Konica Minolta':
# b_id = brands['Konica-Minolta']
# else:
# b_id = brands[brand]
# partcode_id = get_supplies_id(code)
# if partcode_id:
# for model in models:
# model_id = get_model_id(b_id, model, model.replace('-', ' '))
# print(b_id, model, model_id, partcode_id)
# link_partcode_analog(model_id, partcode_id)
| [
"server.ares@gmail.com"
] | server.ares@gmail.com |
d0626e62ea7ce7766ea90dde59f828c2367dc570 | 008ea0c503829f33840495373ad3d60794575af3 | /PYDayByDay/Tkinter_ST/Canvas_TK/Canvas2.py | 074b7463ba665c4567fe3564e8e421627ebab75a | [] | no_license | JyHu/PYStudy | 6515bea47ca6f80e336f3b6a7a14b1159fde872f | ec0855c414237bdd7d0cb28f79a81c02ccd52d45 | refs/heads/master | 2016-08-12T19:44:06.723361 | 2016-04-11T10:38:59 | 2016-04-11T10:38:59 | 45,384,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | #coding=utf-8
__author__ = 'JinyouHU'
'''
创建一个矩形,指定画布的颜色为白色
'''
from Tkinter import *
root = Tk()
#创建一个Canvas,设置其背景色为白色
cv = Canvas(root, bg='white')
#创建一个矩形,坐标为(10, 10, 110, 110)
# cv.create_rectangle(10, 10, 110, 110)
cv.create_rectangle(
10,
10,
110,
110,
fill='red', #内部填充色,可选
outline='green', #外框颜色,可选
width=5, #外框宽度,可选
dash=10, #指定虚线,可选
)
cv.pack()
cv.create_rectangle(120, 10, 220, 110,
outline='red',
stipple='gray12', #使用画刷填充,使用属性stippe
fill='green'
)
#记录一个控件
rt = cv.create_rectangle(10, 120, 110, 220,
outline='red',
stipple='gray12',
fill='green'
)
cv.coords(rt,(120, 120, 220, 220)) #重新设置控件位置
root.mainloop() | [
"auu.aug@gmail.com"
] | auu.aug@gmail.com |
141c446c73f075c9846edd09f6e366d5cac0b2fb | da5ef82554c6c0413193b7c99192edd70fed58dd | /mozdns/mozbind/serial_utils.py | 1f1396c4089231f75470b24922d12641a2f23a6e | [] | no_license | rtucker-mozilla/mozilla_inventory | d643c7713c65aa870e732e18aaf19ce677e277b7 | bf9154b0d77705d8c0fe1a9a35ce9c1bd60fcbea | refs/heads/master | 2020-12-24T17:17:37.621418 | 2013-04-11T10:39:41 | 2013-04-11T10:39:41 | 2,709,399 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | import re
import os
def get_serial(file_):
"""
Retrieve the serial number of a zone.
:param file_: The file with the SOA in it.
:type file_: file
"""
if not os.path.exists(file_):
return ''
with open(file_, 'r') as fd:
return _str_get_serial(fd)
def _str_get_serial(text):
"""Read in a zone file and find the serial number.
:param text: the zone file.
:type text: A file-ish object (StringIO or actual file descriptor)
:returns serial: The serial number
:serial: str
"""
# We already know it's in valid format.
isSOA = False
done = False
for raw_line in text.readlines():
if done:
break
line = raw_line.strip()
ll = LexLine(line)
if isSOA:
# If we made it here, this should be the serial.
serial = _lex_word(ll)
if serial.isdigit():
return serial
else:
return ''
if not line or line[0] == '$' or line[0] == ';':
continue
# name ttl class rr name-server email-addr (sn ref ret ex min)
# 1 2 3 4 5 6 7 8 9 10 11
# Everything up through 6 needs to be on the same line.
_lex_word(ll) # name
_lex_ws(ll)
c = ll.pop()
if c.isdigit():
_lex_word(ll) # ttl
_lex_ws(ll)
else:
ll.unpop()
_lex_word(ll) # class
_lex_ws(ll)
rr = _lex_word(ll)
if rr.upper() != 'SOA':
continue # It's not an soa, keep going.
isSOA = True
_lex_ws(ll)
_lex_word(ll) # ns
_lex_ws(ll)
email = _lex_word(ll) # email
if email[-1:] == '(':
_lex_ws(ll)
else:
_lex_ws(ll)
next = ll.peek()
if next == '(':
ll.pop()
# We are into the numbers.
_lex_ws(ll)
serial = _lex_word(ll)
if not serial:
# The serial must be on the next line
continue
if serial.isdigit():
return serial
else:
return ''
def _lex_word(ll):
word = ''
while True:
# Read in name
c = ll.pop()
if c is None:
if word:
return word
else:
return None
if re.match('\s', c):
ll.unpop()
break
else:
word = word + c
return word
def _lex_ws(ll):
while True:
# Read in name
c = ll.pop()
if c is None:
return
if re.match('\s', c):
continue
else:
ll.unpop()
break
return
class LexLine(object):
def __init__(self, line):
self.line = line
self.length = len(line)
self.pos = 0
def pop(self):
if self.pos == self.length:
return None
else:
c = self.line[self.pos]
self.pos += 1
return c
def unpop(self):
if self.pos > 0:
self.pos -= 1
def peek(self):
return self.line[self.pos]
| [
"uberj@onid.orst.edu"
] | uberj@onid.orst.edu |
ec31feb5138d44c92d5c755e9f0d84a7bf08bd59 | 04c824bb23b3c0ee378a5e915ab9467d5a4d4de7 | /metasub_utils/metadata/metasub_utils/metadata/metadata.py | 6ae55ec9f9c539cf553f957044cf8576e4db09f8 | [
"MIT"
] | permissive | MetaSUB/metasub_utils | a37a8eb79fabd4a922617744e91c9e3e6df2b2f8 | c52c5dde816d710db5ac8dc6f8804bb795a992e4 | refs/heads/master | 2023-01-06T11:14:20.095512 | 2020-02-24T14:23:08 | 2020-02-24T14:23:08 | 143,024,096 | 9 | 2 | MIT | 2022-12-26T20:44:28 | 2018-07-31T14:16:42 | Python | UTF-8 | Python | false | false | 1,540 | py | """Functions for handling metadata."""
import pandas as pd
from .constants import UPLOADABLE_TABLE_URL, COMPLETE_TABLE_URL, CANONICAL_CITIES_URL, IDS
def normalize_sample_name(name_in, default=None, tbl=None):
tbl = get_complete_metadata() if tbl is None else tbl
for id_type in IDS:
mytbl = tbl.query(f'{id_type} == "{name_in}"')
if mytbl.shape[0]:
return list(mytbl.index)[0]
return default
def get_complete_metadata(uploadable=False):
"""Return the complete metadata file as a pandas dataframe."""
if uploadable:
return pd.read_csv(UPLOADABLE_TABLE_URL, dtype=str, index_col=0)
return pd.read_csv(COMPLETE_TABLE_URL, dtype=str, index_col=0)
def get_canonical_city_names(lower=False):
"""Return a set of canonical city names."""
city_tbl = pd.read_csv(CANONICAL_CITIES_URL, dtype=str)
city_names = set(city_tbl.ix[:, 0])
if lower:
city_names = {city_name.lower() for city_name in city_names}
return city_names
def get_samples_from_city(city_name, project_name=None):
"""Return a list of sample names from a particular city.
If city_name is False return a list with all sample names.
"""
metadata = get_complete_metadata()
filtered = metadata
if city_name:
city_name = city_name.lower()
filtered = filtered[filtered['city'] == city_name]
if project_name:
project_name = project_name.upper()
filtered = filtered[filtered['project'] == project_name]
return list(filtered.index)
| [
"dcdanko@gmail.com"
] | dcdanko@gmail.com |
439a07cf5fa1351ea4f1de0562d17edc972dd626 | 31e8b777b8b6da1ef8d172d2c7b5271a892e7dc9 | /frappe/website/doctype/blog_settings/test_blog_settings.py | b7659d58a4901a70c3bcc5a8d2260f7f8053950c | [
"MIT"
] | permissive | Anurag810/frappe | a4d2f6f3a14cc600cced7146a02303cd1cb347f0 | 620cad18d60f090f5f9c13a5eefb56e86615de06 | refs/heads/develop | 2021-09-28T03:57:02.456172 | 2021-09-07T06:05:46 | 2021-09-07T06:05:46 | 157,325,015 | 5 | 0 | MIT | 2019-09-11T09:20:20 | 2018-11-13T05:25:01 | Python | UTF-8 | Python | false | false | 193 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# License: MIT. See LICENSE
# import frappe
import unittest
class TestBlogSettings(unittest.TestCase):
pass
| [
"scm.mymail@gmail.com"
] | scm.mymail@gmail.com |
5e370b0535189e817d1e7ce86559aebc8a85e11f | cc0e5613f1532e9922269530057970eb4f320a1a | /tests/test_concurrency/test_mainloopscheduler/py3_asyncioscheduler.py | 096256bf95b6a49bfd0616a63e23d81f3913478b | [
"Apache-2.0"
] | permissive | Huskyeder/RxPY | 9e353e20f850ce8e031bacafa91187ff2d0d83e4 | 8060b9ef79d2fe6654c0265860af6e8829524131 | refs/heads/master | 2021-01-15T10:18:31.831559 | 2015-04-15T04:34:05 | 2015-04-15T04:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | try:
import asyncio
except ImportError:
raise SkipTest("asyncio not available")
import unittest
from datetime import datetime, timedelta
from time import sleep
from rx.concurrency import AsyncIOScheduler
class TestAsyncIOScheduler(unittest.TestCase):
def test_asyncio_schedule_now(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOScheduler(loop)
res = scheduler.now() - datetime.now()
assert(res < timedelta(seconds=1))
def test_asyncio_schedule_action(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
scheduler.schedule(action)
yield from asyncio.sleep(0.1, loop=loop)
assert(ran == True)
loop.run_until_complete(go())
def test_asyncio_schedule_action_due(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop)
starttime = loop.time()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = loop.time()
scheduler.schedule_relative(0.2, action)
yield from asyncio.sleep(0.3, loop=loop)
diff = endtime-starttime
assert(diff > 0.18)
loop.run_until_complete(go())
def test_asyncio_schedule_action_cancel(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
ran = False
scheduler = AsyncIOScheduler(loop)
def action(scheduler, state):
nonlocal ran
ran = True
d = scheduler.schedule_relative(0.01, action)
d.dispose()
yield from asyncio.sleep(0.1, loop=loop)
assert(not ran)
loop.run_until_complete(go())
| [
"dag@brattli.net"
] | dag@brattli.net |
c537aa38ea4535afc9f71643e2aa07bf33963b72 | c67831f476cb530fc0c26e0bf4258ce18e986749 | /module_intent/control/serializers.py | 8f7e50f0f76fbee43f2d8511cb68a59bfd354ccf | [
"MIT"
] | permissive | cz-qq/bk-chatbot | a3ce4b86452b3de0ff35430c1c85b91d6b23a3e6 | da37fb2197142eae32158cdb5c2b658100133fff | refs/heads/master | 2023-06-05T05:48:22.083008 | 2021-06-15T10:21:30 | 2021-06-15T10:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,846 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.fields import JSONField
from common.constants import CHAT_BOT_TYPES
from common.constants import TASK_PLATFORM_CHOICES
from module_intent.models import Bot
from module_intent.models import ExecutionLog
from module_intent.models import Intent
from module_intent.models import Task
from module_intent.models import Utterances
class BotSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
biz_name = serializers.CharField(required=True, label=_("业务名称"))
bot_id = serializers.CharField(required=True, label=_("业务名称"))
bot_name = serializers.CharField(required=True, label=_("业务名称"))
bot_type = serializers.ChoiceField(
required=True,
label=_("业务名称"),
choices=CHAT_BOT_TYPES,
)
class Meta:
model = Bot
fields = (
"id",
"biz_id",
"biz_name",
"bot_id",
"bot_name",
"bot_type",
"created_by",
"created_at",
"updated_at",
)
class IntentSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
index_id = serializers.IntegerField(required=True, label=_("索引ID"))
intent_name = serializers.CharField(required=True, label=_("技能名称"))
status = serializers.BooleanField(required=True, label=_("意图状态"))
available_user = JSONField(required=True, label=_("可执行用户"))
available_group = JSONField(required=True, label=_("可执行群组"))
is_commit = serializers.BooleanField(required=True, label=_("执行确认"))
class Meta:
model = Intent
fields = "__all__"
class UtterancesSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
index_id = serializers.IntegerField(required=True, label=_("索引ID"))
content = JSONField(required=True, label=_("语料列表"))
class Meta:
model = Utterances
fields = "__all__"
class TaskSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
index_id = serializers.IntegerField(required=True, label=_("索引ID"))
platform = serializers.ChoiceField(
required=True,
label=_("平台名称"),
choices=TASK_PLATFORM_CHOICES,
)
task_id = serializers.CharField(required=True, label=_("任务ID"))
activities = JSONField(required=True, label=_("节点信息"))
slots = JSONField(required=True, label=_("槽位信息"))
source = JSONField(required=True, label=_("任务元数据"))
script = JSONField(required=True, label=_("执行脚本信息"))
class Meta:
model = Task
fields = "__all__"
class ExecutionLogSerializer(serializers.ModelSerializer):
class Meta:
model = ExecutionLog
fields = "__all__"
| [
"123@qq.com"
] | 123@qq.com |
8c8f5f6ce7c4d6b47f77b25521a3c866fb059012 | e74e89592d8a3b1a0b465a7b1595708b224362d2 | /pset_classes/dogs/p4.py | 8919df926c4f0fbbe8a11a8e4b7b3ff001f07552 | [
"MIT"
] | permissive | mottaquikarim/pydev-psets | 016f60f1e9d9a534bd9a66ecde8eb412beee37d1 | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | refs/heads/master | 2023-01-10T11:15:57.041287 | 2021-06-07T23:38:34 | 2021-06-07T23:38:34 | 178,547,933 | 5 | 2 | MIT | 2023-01-03T22:28:27 | 2019-03-30T11:09:08 | Jupyter Notebook | UTF-8 | Python | false | false | 638 | py | """
Dogs IV - Tricks (CHALLENGE!)
"""
# Many dogs know how to do common tricks or follow common commands. You could create methods for each trick/command in the Dog parent class, but the problem is that not all dogs know all tricks/commands.
# However, it would be inefficient to define a custom set of instance methods for tricks/commands every time you instantiate a unique Collie (or SiberianHuskey or Pekingese etc.).
# Find an efficient way to specify which tricks each unique dog knows and to call them. You can use "roll_over", "fetch", "shake_hands", and "spin". Secondly, find a way to teach a dog new trick from this set.
| [
"jgarreffa112@gmail.com"
] | jgarreffa112@gmail.com |
ebfbac8ad5f1f89f5043471096bbff8170a8ac5e | 13556b5ff9d000b707e089f0c1be5451f20fe3fb | /stocks/settings.py | 1dec11126b357d8893492c6129e22229600f1195 | [] | no_license | LeoKnox/Django_Stock | 35a693bb1765e95a6c32b0d8ce622f226bd25ae8 | ac0e93af2be3047d1212909a3587c59b4be81dca | refs/heads/master | 2022-12-08T12:34:34.588551 | 2020-08-30T22:26:35 | 2020-08-30T22:26:35 | 290,670,535 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | """
Django settings for DjangoStock project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4c$zue)#x8%bhz3)6+5z$1roh%=nj0h7c0+h61yx4t1gl(vv_c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'stocks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoStock.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjangoStock.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
] | [
"noreply@github.com"
] | LeoKnox.noreply@github.com |
b89ba760610de6bfde2b410bd653af63fb1cb307 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/charging_moment_ref_structure.py | 50e10eb3a30889d5ce8a8ee36e95e4a2106992f6 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 229 | py | from dataclasses import dataclass
from .type_of_value_ref_structure import TypeOfValueRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class ChargingMomentRefStructure(TypeOfValueRefStructure):
pass
| [
"chris@komposta.net"
] | chris@komposta.net |
d63b165689c63f2123a8b40265384ae8db8134c2 | c43a113f55687ccb38591e42ce729b6de87cc244 | /every_election/apps/elections/migrations/0046_update_status.py | 365892e62950a09e62dd7ba56d668dc5994cafe3 | [] | permissive | DemocracyClub/EveryElection | 9428d00bf725e02c21acd60c7125f6704fcf998a | cbcedc2b236a9287c8272f2596aae3f7a03cf45c | refs/heads/master | 2023-08-23T23:41:13.664059 | 2023-08-21T15:35:03 | 2023-08-21T15:35:03 | 70,236,537 | 11 | 12 | BSD-3-Clause | 2023-09-07T07:54:19 | 2016-10-07T10:22:20 | Python | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("elections", "0045_auto_20181001_1437")]
operations = [
# assume all elections that already exist are approved
migrations.RunSQL(
"""
UPDATE elections_election SET suggested_status='approved'
""",
reverse_sql="""
UPDATE elections_election SET suggested_status='suggested'
""",
)
]
| [
"chris.shaw480@gmail.com"
] | chris.shaw480@gmail.com |
7059aebf40a5fdc792be9c88d86ab6e4b8bd4650 | 55d560fe6678a3edc9232ef14de8fafd7b7ece12 | /tools/build/test/resolution.py | 3af66b46daf757644cc94491ecadab6b821e9fce | [
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | stardog-union/boost | ec3abeeef1b45389228df031bf25b470d3d123c5 | caa4a540db892caa92e5346e0094c63dea51cbfb | refs/heads/stardog/develop | 2021-06-25T02:15:10.697006 | 2020-11-17T19:50:35 | 2020-11-17T19:50:35 | 148,681,713 | 0 | 0 | BSL-1.0 | 2020-11-17T19:50:36 | 2018-09-13T18:38:54 | C++ | UTF-8 | Python | false | false | 924 | py | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests for the target id resolution process.
import BoostBuild
# Create a temporary working directory.
t = BoostBuild.Tester(use_test_config=False)
# Create the needed files
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
# This should use the 'hello' target, even if there is a 'hello' file in the
# current dir.
install s : hello : <location>. ;
""")
t.write("hello.cpp", "int main() {}\n")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/hello.obj")
t.touch("hello.cpp")
t.run_build_system(["s"])
# If 'hello' in the 's' target resolved to file in the current dir, nothing
# will be rebuilt.
t.expect_touch("bin/$toolset/debug*/hello.obj")
t.cleanup()
| [
"james.pack@stardog.com"
] | james.pack@stardog.com |
d6d3f58daf810bd24bdb5ca0a4ad0a20d4a1425f | 00b405a49ac6108d24986243c4b52fa53fb58acc | /0591_tag_validator.py | fb66855422bdf55713952f38d524a0a35966598e | [] | no_license | Shin-jay7/LeetCode | 0325983fff95bfbc43a528812582cbf9b7c0c2f2 | 953b0b19764744753f01c661da969bdab6521504 | refs/heads/master | 2023-07-19T07:17:21.513531 | 2023-07-15T06:05:06 | 2023-07-15T06:05:06 | 231,285,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,969 | py | from __future__ import annotations
import re
class Solution:
def isValid(self, code: str) -> bool:
code = re.sub(r'<!\[CDATA\[.*?\]\]>|t', '-', code)
prev = None
while code != prev:
prev = code
code = re.sub(r'<([A-Z]{1,9})>[^<]*</\1>', 't', code)
return code == 't'
class Solution:
def isValid(self, code: str) -> bool:
# state_machine = ["plain", "open", "close", "cdata"]
curr = "plain"
stack, open_tag, close_tag = [], [], []
idx = 0
while idx < len(code):
char = code[idx]
if curr == "plain":
if not stack and idx != 0:
# code is not in a closed tage
return False
if code[idx:idx+9] == "<![CDATA[":
curr = "cdata"
idx += 9
continue
elif code[idx:idx+2] == '</':
curr = 'close'
idx += 2
continue
elif char == '<':
curr = "open"
elif curr == "open":
if char == '>':
if len(open_tag) > 9 or len(open_tag) < 1:
# open tag name length not valid
return False
stack.append("".join(open_tag))
open_tag = []
curr = 'plain'
idx += 1
continue
if not char.isupper():
# open tag is not upper
return False
open_tag.append(char)
elif curr == 'close':
if char == '>':
if len(close_tag) > 9 or len(close_tag) < 1:
# close tag name length not valid
return False
close_tag_str = "".join(close_tag)
if not stack or close_tag_str != stack[-1]:
# tag no match
return False
else:
stack.pop()
close_tag = []
curr = 'plain'
idx += 1
continue
if not char.isupper():
# close tag is not upper
return False
close_tag.append(char)
elif curr == "cdata":
if code[idx:idx+3] == ']]>':
idx += 3
curr = "plain"
continue
idx += 1
if stack or curr != "plain":
return False
return True
test = Solution()
test.isValid("<DIV>This is the first line <![CDATA[<div>]]></DIV>") # True
# test = Solution()
# test.isValid("<DIV>>> ![cdata[]] <![CDATA[<div>]>]]>]]>>]</DIV>") # True
# test = Solution()
# test.isValid("<A> <B> </A> </B>") # False
| [
"shin@jay7.net"
] | shin@jay7.net |
0928bcf7842c1df47ae48a1e23aa21d7bdac7f51 | afb16c3188bf06af65ae0d998e114c72342bd8be | /note/demo/pydantic_demo/dict2model.py | 5ab2e3aeae2f1224a3cf061a5f8cff325e4a6eb9 | [] | no_license | onsunsl/onsunsl.github.io | aa75f399f1c647bc2e62314633bfe35187e59ad4 | 4ed2b1b9a2407afcbffdf304020d42b81c4c8cdc | refs/heads/master | 2023-05-26T12:33:11.167270 | 2023-04-01T10:18:05 | 2023-04-01T10:18:05 | 237,595,319 | 1 | 0 | null | 2023-05-23T20:13:11 | 2020-02-01T10:02:58 | Python | UTF-8 | Python | false | false | 503 | py | from typing import List
import pydantic
from note.demo.pydantic_demo.my_list import GenericList
class A(pydantic.BaseModel):
a: str
class ListA(GenericList[A]):
pass
class B(pydantic.BaseModel):
a1: List[A] = pydantic.Field(default_factory=ListA)
a2: ListA = pydantic.Field(default_factory=ListA)
b: str
b = B.parse_obj(dict(b="123",
a1=[dict(a="aa1"), dict(a="aa2")],
a2=[dict(a="aa1"), dict(a="aa2")]))
print(b)
print(b.dict())
| [
"onsunsl@foxmail.com"
] | onsunsl@foxmail.com |
918f9042356ae2bdde7f3ab106d057dac1da5860 | b47b530ced544ec4180b2e8ddc8d3bff4b8b97ba | /141/e.py | b50432cbaefa1e3817ff064c5f2575cc055a9294 | [] | no_license | shionhonda/AtCoder | 1069f272490c45d60945a86392642d434a44ee52 | afd254d569505ee38ba3307d0e0e7437fca40814 | refs/heads/master | 2020-03-28T08:00:20.937794 | 2019-11-16T14:30:26 | 2019-11-16T14:30:26 | 147,939,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | N = int(input())
S = input()
def z_algorithm(s):
l = len(s)
A = [0]*l
A[0] = 0
j = 0
for i in range(1,l):
while i+j<l and s[j]==s[i+j]:
j += 1
if j<1:
continue
A[i] = j
k = 1
while k < l-i and k < j-A[k]:
A[i+k] = A[k]
k += 1
i += k
j -= k
#print(A, s)
return A
def main():
ans = 0
for i in range(0, N):
tmp = max(z_algorithm(S[i:]))
#print(tmp)
ans = max(ans, tmp)
print(ans)
main()
| [
"26x.orc.ed5.1hs@gmail.com"
] | 26x.orc.ed5.1hs@gmail.com |
b6c5e83d08aa1892f5291581dd3e2a97d2f4a9e1 | fa4b2b4ce915b4e58737f65efe7d18d1f45cbe27 | /accounts/admin.py | 602d8c011ad760f251464f14af4f49a61ca8a121 | [] | no_license | Wishez/cosmeticsyou-v2.0 | 0fde09158944415b2471cb07dcf1e2cd1df85923 | a0f6a1b11622cb36a5084781ad35f4eed2778f66 | refs/heads/master | 2022-12-26T12:47:53.693887 | 2020-10-12T20:22:30 | 2020-10-12T20:27:54 | 293,092,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from myadmin.admin import admin_site
from django.contrib.admin import DateFieldListFilter
from .models import *
from rangefilter.filter import DateRangeFilter
class ConsultantAdmin(admin.ModelAdmin):
list_per_page = 10
list_display = ('last_name', 'first_name', 'consultant_num', 'phone_number', 'email', 'status', 'refferal_url', 'url_to_personal_room',)
# date_hierarchy = 'last_name'
list_filter = ('status', 'last_name', 'first_name', 'middle_name', 'citizenship', 'city', 'region', ('registered_date', DateRangeFilter),)
filter_horizontal = ('user_lead', 'user_lead_1', 'user_lead_2',)
search_fields = (
'last_name',
'first_name',
'middle_name',
'city',
'region',
'consultant_num',
'passport_data',
'birthday',
'street',
'num_home',
'num_apartment',
'email',
'phone_number',
'user_led',
'user_led_1',
'user_led_2',
)
fieldsets = (
('Персональные данные', {
'fields': (
('email',),
('last_name',),
('first_name',),
('middle_name',),
# ('passport_data',),
('birthday',),
('phone_number',),
('citizenship',),
),
},),
('Адрес', {
'fields': (
('region',),
('city',),
('street',),
('num_home',),
('num_apartment',),
),
},),
('Технические данные', {
'fields': (
('consultant_num', 'status',),
),
},),
('Рферальные данные', {
'fields': (
('refferal_url','url_to_personal_room',),
('user_led', 'user_led_1', 'user_led_2',),
),
},),
('Списки рефералов консультанта', {
'fields': (
('user_lead',), ('user_lead_1',), ('user_lead_2',),
),
},),
)
class RelatedConsultantAdmin(admin.ModelAdmin):
list_per_page = 10
list_display = ('last_name', 'first_name', 'middle_name', 'consultant_num', 'refferal_url', 'url_to_personal_room', 'email',)
list_filter = ('last_name', 'first_name', 'middle_name', 'consultant_num', ('registered_date', DateRangeFilter))
filter_horizontal = ('user_lead', 'user_lead_1', 'user_lead_2',)
search_fields = (
'last_name',
'first_name',
'middle_name',
'email',
'user_led',
'user_led_1',
'user_led_2',
)
fieldsets = (
('Персональная данные', {
'fields': (
('email',),
('last_name',),
('first_name',),
),
},),
('Технические данные', {
'fields': (
('consultant_num', 'status',),
),
},),
('Рферальные данные', {
'fields': (
('refferal_url', 'url_to_personal_room',),
('user_led', 'user_led_1', 'user_led_2',),
),
},),
('Списки рефералов консультанта', {
'fields': (
('user_lead',), ('user_lead_1',), ('user_lead_2',),
),
},),
)
# Register your models here.
admin_site.register(User, ConsultantAdmin)
admin_site.register(RefferalConsultant, ConsultantAdmin)
admin_site.register(RelatedConsultant, RelatedConsultantAdmin) | [
"shiningfinger@list.ru"
] | shiningfinger@list.ru |
b0672f06262344af3f8c8023edd3a85cf64b28f9 | 185f30795be9a8fec6539fe17753fb909e258e4c | /ljy_03函数/ljy_sum1.py | bf763d93261c09589c8b2f04f0bb367167e231b2 | [] | no_license | OPBrother/LearningPython | bd375430ce013abd9a4279f60e5f9457e965bdf7 | 9d264acb269a6191f7ec49abba25c98002f4fcd1 | refs/heads/main | 2023-03-31T06:47:43.071370 | 2021-04-12T07:09:16 | 2021-04-12T07:09:16 | 350,307,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | def sum_num(num1=1, num2=2):
# global num1
# global num2
"""
简简单单的函数加法
:param num1:
:param num2:
:return:
"""
return num1 + num2
reslut = sum_num()
print('%d' % (reslut))
| [
"2276720277@qq.com"
] | 2276720277@qq.com |
889924e6e4c0a39b0451cb1799c08c5073a212ac | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/encodings/utf_32.py | e64e23103e1b600efc009acbda30ea9639b13392 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,801 | py | # 2016.08.04 19:59:25 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/encodings/utf_32.py
"""
Python 'utf-32' Codec
"""
import codecs, sys
encode = codecs.utf_32_encode
def decode(input, errors = 'strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors = 'strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
return
def encode(self, input, final = False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
return
def getstate(self):
if self.encoder is None:
return 2
else:
return 0
def setstate(self, state):
if state:
self.encoder = None
elif sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors = 'strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
return
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
output, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError('UTF-32 stream does not start with BOM')
return (output, consumed)
else:
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
return
def getstate(self):
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
if self.decoder is None:
return (state, 2)
else:
addstate = int((sys.byteorder == 'big') != (self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = codecs.utf_32_be_decode if sys.byteorder == 'big' else codecs.utf_32_le_decode
elif state == 1:
self.decoder = codecs.utf_32_le_decode if sys.byteorder == 'big' else codecs.utf_32_be_decode
else:
self.decoder = None
return
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors = 'strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
return
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
return
def encode(self, input, errors = 'strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
return
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors = 'strict'):
object, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError, 'UTF-32 stream does not start with BOM'
return (object, consumed)
def getregentry():
return codecs.CodecInfo(name='utf-32', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\utf_32.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:59:25 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
fd4490a8aea18addd987c1bc80c11bdd94fd8553 | 9c2ba4f1a2d75b1916e6f20fa95c5fb32d0497d9 | /ScrapingWithPython2/code/myScrapy/todayMovie/todayMovie/spiders/wuHanMovieSpider.py | 1ea663d1da0a6cb675492cc3b984b1b928fedbd1 | [] | no_license | PowerDG/DgCoreInit | abe4b15e38b730c25424f71e6927db982af27a72 | 84e6b7833ddc083b90fcc172c3812dd6f8b51e3d | refs/heads/master | 2023-07-19T11:58:09.220460 | 2019-06-07T14:43:24 | 2019-06-07T14:43:24 | 163,091,619 | 0 | 1 | null | 2023-07-06T21:20:15 | 2018-12-25T14:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,203 | py | # -*- coding: utf-8 -*-
import scrapy
import re
import sys
import os
from todayMovie.items import TodaymovieItem
class WuhanmoviespiderSpider(scrapy.Spider):
name = 'wuHanMovieSpider'
allowed_domains = ['mtime.com']
# start_urls = ['http://mtime.com/']
# def parse(self, response):
# pass
start_urls = ['http://theater.mtime.com/China_Hubei_Province_Wuhan_Wuchang/4316/']
# 武汉。。影院主页
def parse(self, response):
# response 请求返回的数据额
# 第四个body下的script标签
selector = response.xpath('/html/body/script[3]/text()')[0].extract()
# print(selector)
moviesStr = re.search('"movies":\[.*?\]', selector).group()
moviesList = re.findall('{.*?}', moviesStr)
items = []
for movie in moviesList:
mDic = eval(movie)
item = TodaymovieItem()
item['movieTitleCn'] = mDic.get('movieTitleCn')
item['movieTitleEn'] = mDic.get('movieTitleEn')
item['director'] = mDic.get('director')
item['runtime'] = mDic.get('runtime')
items.append(item)
# print(items.count())
return items
| [
"1049365046@qq.com"
] | 1049365046@qq.com |
292fd601726ed3c66db99db9c9ace19128c64869 | c50fb310d8c52284be2c636f951de796eededae9 | /63.py | 190f91e0abdb366118d5410628ca971438ae7a7f | [] | no_license | Deepakdk7/Playerset3 | 6f46f638f22d894b9cc93d81b27c221f9dcdaad3 | 636e1feed0f97bbc9e9495a5dbb81a512ed980c5 | refs/heads/master | 2020-06-03T07:35:23.203780 | 2019-08-06T08:56:16 | 2019-08-06T08:56:16 | 191,497,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | ax=int(input())
c=[]
a=list(map(int,input().split()))
b=list(map(int,input().split()))
for i in range(0,ax):
for j in range(0,ax):
if a[i]==b[j] and a[i] not in c:
c.append(a[i])
for i in c:
print(i,"",end="")
| [
"noreply@github.com"
] | Deepakdk7.noreply@github.com |
48070dfb8e7b5182733e50c6e889c4a56c5e1a2f | ed269e9a4d9d6bfbb833381b7aef65a23f391fe2 | /比赛/1438. 绝对差不超过限制的最长连续子数组.py | 0793980db1ebafedb598454840907d61b8b7e099 | [] | no_license | Comyn-Echo/leeCode | fcff0d4c4c10209a47bd7c3204e3f64565674c91 | 67e9daecb7ffd8f7bcb2f120ad892498b1219327 | refs/heads/master | 2023-04-28T17:35:52.963069 | 2021-05-19T01:52:16 | 2021-05-19T01:52:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | class Solution(object):
def longestSubarray(self, nums, limit):
if not nums:
return 0
curr_max = nums[0] # 当子数组下最大值 这里初始化为第一个数
curr_min = nums[0] # 当子数组下最大值 这里初始化为第一个数
sub_nums = [] # 以数组作为窗口滑动
for num in nums:
if abs(num - curr_max) <= limit and abs(num - curr_min) <= limit and abs(curr_max - curr_min) <= limit:
curr_max = max(num,curr_max)
curr_min = min(num,curr_min)
sub_nums.append(num)
else:
sub_nums.append(num)
sub_nums.pop(0)
curr_max = max(sub_nums) # 当子数组最大值
curr_min = min(sub_nums) # 当前子数组最小值
return len(sub_nums)
Solution.longestSubarray(None,nums = [8,2,4,7], limit = 4) | [
"2892211452aa@gmail.com"
] | 2892211452aa@gmail.com |
070cddb15ffb012a7b728cd8e739baf89d2f0b4b | a99a44aee5cfc5e080f6d83d2bcc1c3d273a3426 | /scripts/ingestors/rwis/process_traffic.py | 1a9456b736a38d76f8f227d31d692573117d3685 | [
"MIT"
] | permissive | ragesah/iem | 1513929c8bc7f254048271d61b4c4cf27a5731d7 | 8ed970d426bddeaa3e7ded593665d22f0f9f6e87 | refs/heads/main | 2023-08-20T20:01:15.480833 | 2021-10-12T15:44:52 | 2021-10-12T15:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,657 | py | """Ingest Iowa DOT RWIS data provided by DTN.
called from RUN_10_AFTER.sh
"""
import datetime
from pyiem.network import Table as NetworkTable
from pyiem.util import get_properties, get_dbconn, utc, logger
import pandas as pd
import requests
LOG = logger()
DBCONN = get_dbconn("iem")
NT = NetworkTable("IA_RWIS")
def load_metadata():
"""Load up what we know about these traffic sites."""
meta = {}
cur = DBCONN.cursor()
cur.execute(
"SELECT location_id, lane_id, sensor_id from rwis_traffic_meta"
)
rows = cur.fetchall()
cur.close()
for row in rows:
key = f"{row[0]}_{row[1]}"
meta[key] = row[2]
return meta
def create_sensor(cursor, key, row, meta):
"""create an entry."""
cursor.execute(
"INSERT into rwis_traffic_sensors(location_id, lane_id, name) "
"VALUES (%s, %s, %s) RETURNING id",
(
row["stationId"].replace("IA", ""),
row["sensorId"],
row["sensorName"],
),
)
sensor_id = cursor.fetchone()[0]
LOG.info(
"Adding RWIS Traffic Sensor: %s Lane: %s Name: %s DB_SENSOR_ID: %s",
row["stationId"],
row["sensorId"],
row["sensorName"],
sensor_id,
)
meta[key] = sensor_id
cursor.execute(
"INSERT into rwis_traffic_data(sensor_id) VALUES (%s)", (sensor_id,)
)
def process(cursor, df, meta):
"""Process our data."""
rows = []
for _, row in df.iterrows():
data = dict(row)
if "stationId" not in data:
LOG.info("hit data quirk with row %s", row)
continue
key = f"{int(row['stationId'].replace('IA', ''))}_{row['sensorId']}"
if key not in meta:
create_sensor(cursor, key, row, meta)
data["sensor_id"] = meta[key]
rows.append(data)
# 'volume',
# 'occupancy',
# 'normalLength', 'longLength', 'unclassifiedLength', 'qcFailures'
cursor.executemany(
"UPDATE rwis_traffic_data SET valid = %(utcTime)s, "
"avg_speed = %(avgSpeed)s, normal_vol = %(normalLength)s, "
"long_vol = %(longLength)s, occupancy = %(occupancy)s "
"WHERE sensor_id = %(sensor_id)s and valid < %(utcTime)s",
rows,
)
def main():
"""Go Main Go."""
# prevent a clock drift issue
ets = utc() - datetime.timedelta(minutes=1)
sts = ets - datetime.timedelta(hours=4)
edate = ets.strftime("%Y-%m-%dT%H:%M:%SZ")
sdate = sts.strftime("%Y-%m-%dT%H:%M:%SZ")
meta = load_metadata()
props = get_properties()
apikey = props["dtn.apikey"]
headers = {"accept": "application/json", "apikey": apikey}
for nwsli in NT.sts:
idot_id = NT.sts[nwsli]["remote_id"]
if idot_id is None:
continue
URI = (
f"https://api.dtn.com/weather/stations/IA{idot_id:03}/"
f"traffic-observations?startDate={sdate}"
f"&endDate={edate}&units=us&precision=0"
)
req = requests.get(URI, timeout=60, headers=headers)
if req.status_code != 200:
# HACK
if idot_id < 73:
LOG.info("Fetch %s got status_code %s", URI, req.status_code)
continue
res = req.json()
if not res:
continue
try:
df = pd.DataFrame(res)
except Exception as exp:
LOG.info(
"DataFrame construction failed with %s\n res: %s", exp, res
)
continue
cursor = DBCONN.cursor()
process(cursor, df, meta)
cursor.close()
DBCONN.commit()
if __name__ == "__main__":
main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
206383854ad7ed2c4a4c906be142b4f1f5a53f0a | 2b8c88dfee5c5a784357515eafe8cd5f997c8774 | /learn_ppdai/learn_sqlite.py | 5117e136ead3ef8863fa6945912a78c800468fea | [] | no_license | archenRen/learnpy | e060f3aa2f77c35fc1b12345720af6c8b528da57 | 934ef76b97297f746a722a48c76672c7bc744cd9 | refs/heads/master | 2022-04-28T20:25:59.114036 | 2020-05-03T02:16:03 | 2020-05-03T02:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import pandas as pd
from sqlalchemy import create_engine
import json
import sys
if sys.platform == 'win32':
path = r'C:\Users\wangdi03\Downloads\history.db'
else:
path = './history.db'
engine = create_engine('sqlite:///' + path)
df = pd.read_sql_query(
"""
select * from usermmvs
""",
engine
)
for var in df['variablejson']:
dic = json.loads(var)
print dic['userid'], dic['pc_credit_edu']
print('ok')
| [
"wangdi03@ppdai.com"
] | wangdi03@ppdai.com |
fc294056e5978d3fb4a4b61abe28a0ba09e92183 | 2d82d4c6574bd6d32f2cf1c781615f7951f55f66 | /muntjac/demo/sampler/features/windows/SubwindowModal.py | 17e9f816cadf2ee1565cb1e6c0cf44512e3c4d13 | [
"Apache-2.0"
] | permissive | metaperl/muntjac | f83f745ee03942a61af92ee7fba7285aa9c46f3c | 8db97712edd81b4d25deaaa48587d2a08010f2c8 | refs/heads/master | 2021-01-15T22:04:25.057862 | 2012-11-09T03:52:59 | 2012-11-09T03:52:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py |
from muntjac.demo.sampler.NamedExternalResource import NamedExternalResource
from muntjac.demo.sampler.APIResource import APIResource
from muntjac.demo.sampler.Feature import Feature, Version
from muntjac.ui.window import Window
class SubwindowModal(Feature):
def getSinceVersion(self):
return Version.OLD
def getName(self):
return 'Modal window'
def getDescription(self):
return ('A <i>modal window</i> blocks access to the rest of the '
'application until the window is closed (or made non-modal).<br/>'
'Use modal windows when the user must finish the task in the '
'window before continuing.')
def getRelatedAPI(self):
return [APIResource(Window)]
def getRelatedFeatures(self):
from muntjac.demo.sampler.features.windows.Subwindow import Subwindow
from muntjac.demo.sampler.FeatureSet import Windows
return [Subwindow, Windows]
def getRelatedResources(self):
return [NamedExternalResource('Wikipedia: Modal window',
'http://en.wikipedia.org/wiki/Modal_window')]
| [
"r.w.lincoln@gmail.com"
] | r.w.lincoln@gmail.com |
4197cdda503cccf0d608f684b3a945810598daaa | 8e2b2aa7d7405ed351072874d75e947619379cdb | /src/billing/migrations/0006_charge.py | 0f9995f46654d802c83ca1ba6337ad9b30e6248d | [] | no_license | hoanguyen-rozer/learn-django-ecommerce--2hand | f0bc11b4331fae6d060e24c29a5293170342ff2b | b7c67e6a5703edfe922d519f576d7d87f16a7dba | refs/heads/master | 2022-06-14T10:13:17.800193 | 2020-05-05T09:49:17 | 2020-05-05T09:49:17 | 256,304,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | # Generated by Django 3.0.5 on 2020-04-25 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('billing', '0005_card_default'),
]
operations = [
migrations.CreateModel(
name='Charge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', models.CharField(max_length=120)),
('paid', models.BooleanField(default=False)),
('refunded', models.BooleanField(default=False)),
('outcome', models.TextField(blank=True, null=True)),
('outcome_type', models.CharField(blank=True, max_length=120, null=True)),
('seller_message', models.CharField(blank=True, max_length=120, null=True)),
('risk_level', models.CharField(blank=True, max_length=120, null=True)),
('billing_profile', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='billing.BillingProfile')),
],
),
]
| [
"phuchoa099@gmail.com"
] | phuchoa099@gmail.com |
2794a116155b8d4c376b9759d4623ae07de36c4b | 14a1312dfb7c4d5e2b76f49b0837cc024f5a1295 | /python/gate/background/digester.py | dd893310ce429b07bb65d21afd9209bd468b288b | [] | no_license | bropony/gamit | b3a493c55407efa83ae20286b1e624b280b46494 | 47811e2cfe67c3c0de4c4be7394dd30e48732799 | refs/heads/master | 2020-05-17T01:51:38.887194 | 2015-11-05T12:57:13 | 2015-11-05T12:57:13 | 36,106,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | """
@author: mahanzhou
@date: 8/6/15
@file:
@desc:
"""
from gamit.log.logger import Logger
from gamit.utils.myuuid import MyUuid
from gamit.mongodb.database import MongoDatabase
from message.db.systemcommand import ESysCommandType
from message.db.mongodb.posttables import TSysTopic
import json
from social.systopicmanager import SysTopicManager
class __SystemCommandDigester:
def digest(self, systemCommand):
"""
:type systemCommand: message.db.mongodb.utiltables.TSystemCommand
:rtype: bool
"""
operStatus = False
if systemCommand.commandType == ESysCommandType.AddSysTopic:
operStatus = self.__addSysTopic(systemCommand)
elif systemCommand.commandType == ESysCommandType.AddCommercialAd:
pass
else:
Logger.logInfo("__SystemCommandDigester.digest: undigested command type:", systemCommand.commandType)
return operStatus
def __addSysTopic(self, scmd):
"""
:type scmd: message.db.mongodb.utiltables.TSystemCommand
:rtype: bool
"""
jsTopic = json.loads(scmd.stringVal, "UTF8")
tsysTopic = TSysTopic()
tsysTopic._fromJson(jsTopic)
tsysTopic.topicId = MyUuid.getUuid()
tb = MongoDatabase.findTableByMessageType(TSysTopic)
if not tb:
Logger.logInfo("__SystemCommandDigester.__addSysTopic. Table not found: ", TSysTopic.__name__)
return
tb.save(tsysTopic)
SysTopicManager.addNewSysTopics([tsysTopic])
return True
SystemCommandDigester = __SystemCommandDigester()
| [
"ahda@qq.com"
] | ahda@qq.com |
e8a2cf0901af06fe8256b304d58206bea59f42a6 | 1e8142725aa06844713d18fa38c6779aff8f8171 | /tndata_backend/goals/migrations/0047_auto_20150531_1646.py | 252f0df2ecd3c85d51b521c3169888dc1e1bf6e7 | [
"MIT"
] | permissive | tndatacommons/tndata_backend | 8f4db3e5cf5272901c9087a85e21d7560240bb3b | 3d22179c581ab3da18900483930d5ecc0a5fca73 | refs/heads/master | 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 | Python | UTF-8 | Python | false | false | 472 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('goals', '0046_behaviorprogress_goalprogress'),
]
operations = [
migrations.AlterField(
model_name='behaviorprogress',
name='status',
field=models.IntegerField(choices=[(1, 'Off Course'), (2, 'Seeking'), (3, 'On Course')]),
),
]
| [
"brad@bradmontgomery.net"
] | brad@bradmontgomery.net |
40bd087b309f78f9719a323572d870a3bf279dc9 | d860a2c1fa8fffc76a9101e4f91cecc80c27e802 | /leetcode/859_Buddy_Strings.py | 9d27eb3c71441e478d0179fbd66d1bd7781c8c35 | [] | no_license | heroming/algorithm | 80ea8f00ac049b0bc815140253568484e49c39e3 | 18e510f02bff92bc45cceb7090a79fbd40c209ec | refs/heads/master | 2021-01-19T01:27:31.676356 | 2019-06-09T08:51:16 | 2019-06-09T08:51:16 | 62,952,889 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | class Solution(object):
def buddyStrings(self, s, t) :
if len(s) != len(t) :
return False
dif = []
for i in xrange(len(s)) :
if s[i] != t[i] :
if len(dif) >= 2 :
return False
dif.append(i)
if len(dif) == 2 :
a, b = dif[0], dif[1]
return s[a] == t[b] and s[b] == t[a]
elif len(dif) == 0 :
dic = {}
for c in s :
if c in dic :
return True
else :
dic[c] = True
return False
else :
return False
| [
"heroming7788@gmail.com"
] | heroming7788@gmail.com |
8bbde69a27066cd21cc393c9b6560021a483176e | 99249dad36df26a712ae8d900041d53acf3901ea | /test/ps_delay_test-3.3.py | cb4c41fb0c53441f4683cf1318517a72829878b5 | [
"MIT"
] | permissive | bopopescu/Lauecollect | f1f79c2cc5ff106df0dedbd6939ec92630d2b305 | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | refs/heads/master | 2022-11-29T00:40:28.384831 | 2019-06-05T01:21:36 | 2019-06-05T01:21:36 | 280,989,300 | 0 | 0 | MIT | 2020-07-20T02:03:22 | 2020-07-20T02:03:22 | null | UTF-8 | Python | false | false | 2,748 | py | """Delay line linearity characterization
Friedrich Schotte, Jul 22, 2015 - Apr 20, 2015
Setup:
Ramsay-100B RF Generator, 351.93398 MHz +10 dBm -> FPGA RF IN
FPGA 1: X-scope trig -> CH1, DC50, 500 mV/div
FPGA 13: ps L oscill -> DC block -> 90-MHz low-pass -> CH2, DC50, 500 mV/div
Timebase 5 ns/div
Measurement P1 CH2, time@level, Absolute, 0, Slope Pos, Gate Start 4.5 div,
Stop 5.5 div
Waitting time: 97.8 ms
"""
__version__ = "3.3"
from instrumentation import timing_system,timing_sequencer,lecroy_scope
from timing_sequence import lxd,Sequence
from scan import rscan,timescan as tscan
from sleep import sleep
from numpy import arange
delay = lecroy_scope().measurement(2)
tmax = timing_system.clk_shift.max_dial
nsteps = (timing_system.clk_shift.max_count+1)
class Clk_shift_count(object):
name = "clk_shift.count"
def get_value(self): return timing_system.clk_shift.count
def set_value(self,value): timing_system.clk_shift.count = value
value = property(get_value,set_value)
clk_shift_count = Clk_shift_count()
def scan():
delay = lecroy_scope().measurement(1)
tmax = 5*timing_system.bct
nsteps = tmax/timing_system.clk_shift.stepsize
lxd.value = 0
data = rscan([lxd,delay.gate.start,delay.gate.stop],0,[tmax,-tmax,-tmax],
nsteps,[clk_shift_count,delay],averaging_time=10.0,logfile="logfiles/scan.log")
def scan_delayline():
delay = lecroy_scope().measurement(2)
tmax = timing_system.clk_shift.max_dial
nsteps = tmax/timing_system.clk_shift.stepsize
timing_sequencer.running = False
timing_system.xosct.enable.count = 1
timing_system.clk_shift.dial = 0
data = rscan([timing_system.clk_shift,delay.gate.start,delay.gate.stop],
[0,0,0],[tmax,tmax,tmax],nsteps,[clk_shift_count,delay],
averaging_time=10.0,logfile="logfiles/scan_delayline.log")
def timescan():
data = tscan(delay,averaging_time=10.0,logfile="logfiles/timescan.log")
def register_counts():
trange = arange(0,tmax,tmax/50)
pso = [Sequence(ps_lxd=t).register_counts[1][16][0] for t in trange]
clk_shift = [Sequence(ps_lxd=t).register_counts[1][17][0] for t in trange]
return pso,clk_shift
def reset_dcm():
timing_system.clk_shift_reset.count = 1
sleep(0.2)
timing_system.clk_shift_reset.count = 0
def peridiocally_reset_dcm(wait_time=60):
while True:
try:
reset_dcm()
sleep(wait_time)
except KeyboardInterrupt:
timing_system.clk_shift_reset.count = 0
break
if __name__ == "__main__":
print('timing_system.ip_address = %r' % timing_system.ip_address)
print('lecroy_scope().ip_address = %r' % lecroy_scope().ip_address)
print('scan_delayline()')
print('scan()')
| [
"friedrich.schotte@gmail.com"
] | friedrich.schotte@gmail.com |
cb51a14333b36709a28fef261b38acdb99cd369c | da3b03b013c00450dff6686b02d71209b3c6a271 | /pannier/pannier/tests.py | d9125299877aacf20c5513ae83a56aa624289f2f | [] | no_license | domestique/pannier | cf425e30bb4dd1999169a2375b9e2b2da599a376 | b92d40cc3e9a2523962d39895e58b9e1f7f36ae2 | refs/heads/master | 2021-01-20T19:14:16.201440 | 2016-12-22T20:46:11 | 2016-12-22T20:46:11 | 63,989,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,270 | py | import os
from mock import patch
from django.core import mail
from django.test import TestCase, override_settings
from django.core.urlresolvers import reverse
from pannier import forms, models
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_DIR = os.path.join(CUR_DIR, 'test_data')
class BaseCase(TestCase):
def assertStatusCode(self, response, status_code=200):
self.assertEqual(response.status_code, status_code)
class TestLeadModel(BaseCase):
def _create_lead(self, lead_details=None):
lead_details = lead_details if lead_details else {}
return models.Lead.create_lead(
first_name=lead_details.get('first_name', 'Klea'),
last_name=lead_details.get('last_name', 'Ridley'),
company_name=lead_details.get('company_name', 'Domestique Studios'),
domain_name=lead_details.get('domain_name', 'domestique'),
email_address=lead_details.get('email_address', 'support@domestiquestudios.com'),
phone_number=lead_details.get('phone_number', '123-123-1234'),
team_size=lead_details.get('team_size', '1-10'),
)
def test_full_name(self):
lead = self._create_lead()
self.assertEqual(lead.full_name, 'Klea Ridley')
def test__str(self):
lead = self._create_lead()
self.assertEqual(lead.__str__(), 'Lead: {}'.format(lead.full_name))
class TestPannierViews(BaseCase):
def test_lead_create_get(self):
response = self.client.get(reverse('lead-create'))
self.assertStatusCode(response, 200)
self.assertTrue(
isinstance(response.context['form'], forms.LeadForm),
)
self.assertTemplateUsed('base.html')
self.assertTemplateUsed('lead.html')
def test_lead_create(self):
response = self.client.post(reverse('lead-create'), {
'first_name': 'Pat',
'last_name': 'Patterson',
'company_name': 'Patty Cakes',
'domain_name': 'itspat',
'email_address': 'pat@pattycakes.com',
'phone_number': '321-321-4321',
'team_size': '10-30',
})
self.assertStatusCode(response, 302)
self.assertRedirects(response, reverse('thanks'))
lead = models.Lead.objects.get(first_name='Pat')
self.assertEqual(lead.last_name, 'Patterson')
self.assertEqual(lead.company_name, 'Patty Cakes')
self.assertEqual(lead.domain_name, 'itspat')
self.assertEqual(lead.email_address, 'pat@pattycakes.com')
self.assertEqual(lead.phone_number, '321-321-4321')
self.assertEqual(lead.team_size, '10-30')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'New Invite Signup!')
def test_lead_create_error(self):
response = self.client.post(reverse('lead-create'), {
'first_name': 'Pat',
'last_name': 'Patterson',
'company_name': 'Patty Cakes',
'domain_name': 'itspat',
'email_address': 'pat@pattycakes.com',
'phone_number': '321-321-4321',
'team_size': 'BAD TEAM SIZE',
})
self.assertStatusCode(response, 200)
self.assertTemplateUsed('base.html')
self.assertTemplateUsed('lead.html')
form = response.context['form']
self.assertEqual(
form.errors['team_size'],
['Select a valid choice. BAD TEAM SIZE is not one of the available choices.']
)
def test_thanks_get(self):
response = self.client.get(reverse('thanks'))
self.assertStatusCode(response, 200)
self.assertTemplateUsed('base.html')
self.assertTemplateUsed('thanks.html')
@override_settings(PANNIER_WORKSPACE='/home/workspace/')
@patch('pannier.views.call')
def test_docker_webhook(self, call_mock):
json_data = open(os.path.join(TEST_DIR, 'docker_hook.json')).read()
response = self.client.post(reverse('docker'), content_type='application/json', data=json_data)
self.assertStatusCode(response, 200)
call_mock.assert_called_with(
'./tag_new_version.sh', shell=True, cwd='/home/workspace/'
)
self.assertEqual(len(mail.outbox), 1)
| [
"f4nt@f4ntasmic.com"
] | f4nt@f4ntasmic.com |
44fed3a3ec344dc97f314b02c3c8e824d51ba32f | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/snippets/unpacking_args_kwargs.py | a7209cfedb745c049a9ad5658e6f9531d6481c2d | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | UTF-8 | Python | false | false | 660 | py | #!/usr/local/bin/python3
'''
Test program to test *args and **kwargs unpacking in function calls
'''
def f1(a, b, c):
return a+b+c
def f2(a):
return a+10
def f3():
return 100
def handler(fn, *args, **kwargs):
return fn(*args, **kwargs)
if __name__ == '__main__':
assert(handler(f1, 1, 2, 3) == 6)
assert(handler(f2, 3) == 13)
assert(handler(f3) == 100)
t = (2,3,5)
assert((f1(*t) == 10))
p = [6,4,3]
assert(f1(*p) == 13)
assert(f2(*[1]) == 11)
assert(f3(*[]) == 100)
assert(handler(f1, c="3", b="2", a="1") == "123")
d = {'a': 1, 'c': 4, 'b': 2}
assert(handler(f1, **d) == 7)
x = {'a': 1}
assert(handler(f2, **x) == 11)
| [
"vinithepooh@gmail.com"
] | vinithepooh@gmail.com |
90f1114b75076faa067987410e79c0e2fca5b744 | 1fb87c2038ea178ab8b7d600da6a105ccd35b44a | /ucscsdk/mometa/gl/GlVlan.py | 6fade91aaf3af0da4d65621f4af627cdba227649 | [
"Apache-2.0"
] | permissive | hrupprecht/ucscsdk | 72fe255dfb2d68b620b52793eae38e4d1b1ed7e7 | 1a62184548300ad1071780a2519c60552f0a21a2 | refs/heads/master | 2020-09-28T23:30:23.074800 | 2019-12-17T08:20:28 | 2019-12-17T08:20:28 | 226,891,866 | 0 | 0 | NOASSERTION | 2019-12-09T14:32:53 | 2019-12-09T14:32:52 | null | UTF-8 | Python | false | false | 6,814 | py | """This module contains the general information for GlVlan ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class GlVlanConsts():
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
OPER_STATE_CONFLICT = "Conflict"
OPER_STATE_CONFLICT_RESOLVED = "ConflictResolved"
OPER_STATE_FAILED_TO_GLOBALIZE = "FailedToGlobalize"
OPER_STATE_GLOBALIZED = "Globalized"
OPER_STATE_GLOBALIZING = "Globalizing"
OPER_STATE_NOT_CONFLICT = "NotConflict"
OPER_STATE_NOT_EVALUATED = "NotEvaluated"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
POLICY_OWNER_UNSPECIFIED = "unspecified"
SHARING_COMMUNITY = "community"
SHARING_ISOLATED = "isolated"
SHARING_NONE = "none"
SHARING_PRIMARY = "primary"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
SWITCH_ID_MGMT = "mgmt"
class GlVlan(ManagedObject):
"""This is GlVlan class."""
consts = GlVlanConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("GlVlan", "glVlan", "vlan-[id]", VersionMeta.Version201b, "InputOutput", 0x3f, [], ["admin"], [u'glVnetInvHolder'], [u'messageEp'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201b, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"context_dn": MoPropertyMeta("context_dn", "contextDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_WRITE, 0x2, 0, 256, None, [], []),
"deploy_dn": MoPropertyMeta("deploy_dn", "deployDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"globalized_dn": MoPropertyMeta("globalized_dn", "globalizedDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version201b, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"inv_dn": MoPropertyMeta("inv_dn", "invDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"mcast_policy_dn": MoPropertyMeta("mcast_policy_dn", "mcastPolicyDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["Conflict", "ConflictResolved", "FailedToGlobalize", "Globalized", "Globalizing", "NotConflict", "NotEvaluated"], []),
"policy_class_name": MoPropertyMeta("policy_class_name", "policyClassName", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["local", "pending-policy", "policy", "unspecified"], []),
"pub_nw_dn": MoPropertyMeta("pub_nw_dn", "pubNwDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sharing": MoPropertyMeta("sharing", "sharing", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["community", "isolated", "none", "primary"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201b, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE", "mgmt"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"vnet_id": MoPropertyMeta("vnet_id", "vnetId", "uint", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["1-4093"]),
}
prop_map = {
"childAction": "child_action",
"contextDn": "context_dn",
"deployDn": "deploy_dn",
"dn": "dn",
"globalizedDn": "globalized_dn",
"id": "id",
"ifRole": "if_role",
"invDn": "inv_dn",
"mcastPolicyDn": "mcast_policy_dn",
"name": "name",
"operState": "oper_state",
"policyClassName": "policy_class_name",
"policyOwner": "policy_owner",
"pubNwDn": "pub_nw_dn",
"rn": "rn",
"sharing": "sharing",
"status": "status",
"switchId": "switch_id",
"type": "type",
"vnetId": "vnet_id",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.context_dn = None
self.deploy_dn = None
self.globalized_dn = None
self.if_role = None
self.inv_dn = None
self.mcast_policy_dn = None
self.name = None
self.oper_state = None
self.policy_class_name = None
self.policy_owner = None
self.pub_nw_dn = None
self.sharing = None
self.status = None
self.switch_id = None
self.type = None
self.vnet_id = None
ManagedObject.__init__(self, "GlVlan", parent_mo_or_dn, **kwargs)
| [
"paragsh@cisco.com"
] | paragsh@cisco.com |
7bfd5dfee9ff148327031ee7d52e2318c6260188 | ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a | /AtCoder/Beginner 147/D.py | b54187e86e75040999da26a123dde94613b6de5d | [] | no_license | cormackikkert/competitive-programming | f3fa287fcb74248ba218ecd763f8f6df31d57424 | 3a1200b8ff9b6941c422371961a127d7be8f2e00 | refs/heads/master | 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | N = int(input())
C1 = [0 for i in range(60)]
C0 = [0 for i in range(60)]
V = list(map(int, input().split()))
for n in V:
i = 0
for i in range(60):
if (n % 2 == 1):
C1[i] += 1
else:
C0[i] += 1
n //= 2
MOD = int(pow(10, 9) + 7)
total = 0;
for i in range(60):
total += pow(2, i) * C1[i] * C0[i]
total %= MOD
print(total)
| [
"u6427001@anu.edu.au"
] | u6427001@anu.edu.au |
31184b42fe6be39f479fd65591764c7419f647dc | 7b2a3ea853dc44aea204f02abedaad6a2029f4ff | /sw4_test001.py | 7ca8ff6610e40c64e60948828f1a40e7e5490344 | [] | no_license | NoisyLeon/SW4Py | 7d45503282dc988b5f886c039706bd79fdd6b339 | 7029f18eb526bcb46b4aa244da1e088ca57a56aa | refs/heads/master | 2020-12-22T14:57:11.265397 | 2016-12-20T18:27:18 | 2016-12-20T18:27:18 | 56,792,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | import obspy
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
infname = '/lustre/janus_scratch/life9360/sw4_working_dir_Q/field_R_200km_zmax_4km_qs_ak135_vs_2000/Tgr_10.0.txt'
inArr=np.loadtxt(infname)
plt.figure();
T=inArr[:,2]
DistArr=inArr[:,3]
VgrArr=DistArr/T
mindist=DistArr.min()
indexmin=DistArr.argmin()
plt.plot(DistArr, T,'o' );
# plt.plot(DistArr, (VgrArr-VgrArr[indexmin])/VgrArr[indexmin]*100.,'o' );
# plt.ylabel('Relative Difference in Vgr (%)');
plt.ylabel('Vgr(km/s)');
plt.ylabel('Travel time(sec)');
plt.xlabel('Distance(km)');
infname = '/lustre/janus_scratch/life9360/sw4_working_dir_Q/field_R_200km_zmax_4km_qs_ak135_vs_2000/Amp_10.0.txt'
inArr2=np.loadtxt(infname)
AmpArr=inArr2[:,2]
DistArr=inArr2[:,3]
fig, ax=plt.subplots()
mindist=DistArr.min()
indexmin=DistArr.argmin()
maxamp=AmpArr[indexmin]
# plt.plot(DistArr, AmpArr*1e9,'o' );
plt.ylabel('Amplitude');
plt.xlabel('Distance(km)')
CampArr=AmpArr*np.sqrt(DistArr/mindist ) /AmpArr[indexmin]
# CampArr=AmpArr*DistArr/ DistArr[0]
# plt.plot(DistArr, (CampArr-CampArr[indexmin])/CampArr[indexmin]*100.,'o' );
plt.plot(DistArr, CampArr,'o' );
y1=900
y2=1300
ax.fill_betweenx(np.array([0.2, 1.1]), y1, y2, facecolor='red', alpha=0.5)
plt.show()
slope, intercept, r_value, p_value, std_err = stats.linregress(DistArr, DistArr/VgrArr);
print slope, intercept, r_value, p_value, std_err
| [
"lili.feng@colorado.edu"
] | lili.feng@colorado.edu |
c5895fe28be33a134db7f26565c1fbe352b452f3 | a12c090eb57da4c8e1f543a1a9d497abad763ccd | /django-stubs/contrib/sessions/backends/signed_cookies.pyi | 7e91615680389abac85e9324dc87e7511fd3f27d | [
"BSD-3-Clause"
] | permissive | debuggerpk/django-stubs | be12eb6b43354a18675de3f70c491e534d065b78 | bbdaebb244bd82544553f4547157e4f694f7ae99 | refs/heads/master | 2020-04-04T08:33:52.358704 | 2018-09-26T19:32:19 | 2018-09-26T19:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | pyi | from datetime import datetime
from typing import Any, Dict, Optional, Union
from django.contrib.sessions.backends.base import SessionBase
class SessionStore(SessionBase):
accessed: bool
serializer: Type[django.core.signing.JSONSerializer]
def load(self) -> Dict[str, Union[datetime, str]]: ...
modified: bool = ...
def create(self) -> None: ...
def save(self, must_create: bool = ...) -> None: ...
def exists(self, session_key: Optional[str] = ...) -> bool: ...
def delete(self, session_key: Optional[str] = ...) -> None: ...
def cycle_key(self) -> None: ...
@classmethod
def clear_expired(cls) -> None: ...
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
95941fdc60a6774a6323af41a5f6e8115a00c243 | 2345b3388ca9322e26974b6dd06d592a3b19c6b5 | /python/sysdesign/designFileSystem.py | 9312f30309dfe901a2ba8778d9147e9cae185ec5 | [] | no_license | XifeiNi/LeetCode-Traversal | b94db963cce782dfa641ca04e70876053d53f00d | fdb6bcb4c721e03e853890dd89122f2c4196a1ea | refs/heads/master | 2021-07-23T21:07:24.562063 | 2021-07-05T16:52:12 | 2021-07-05T16:52:12 | 190,349,692 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | class FileSystem:
def __init__(self):
self.val = {}
def createPath(self, path: str, value: int) -> bool:
words = path.split('/')[1:-1]
s = ""
for word in words:
s += "/" + word
if s not in self.val:
return False
if path in self.val and self.val[path] != value:
return False
self.val[path] = value
return True
def get(self, path: str) -> int:
if path not in self.val:
return -1
return self.val[path]
# Your FileSystem object will be instantiated and called as such:
# obj = FileSystem()
# param_1 = obj.createPath(path,value)
# param_2 = obj.get(path)
| [
"cecilia990@outlook.com"
] | cecilia990@outlook.com |
2d04eb5998aa6e20f7eab5b3ced0cb89f3d537a0 | 4c2c084a57ce514ed5f41877f372c6d1426c823b | /grr/server/grr_response_server/db_signed_binaries_test.py | 42deafa44b8b3e496c3e5412cfe43d4aa9a00000 | [
"Apache-2.0"
] | permissive | 4ndygu/grr | db1a8c781f52345aa21b580b3754d41a140e27f9 | cfc725b5ee3a2626ac4cdae7fb14471612da4522 | refs/heads/master | 2020-04-18T09:09:42.076738 | 2019-01-24T19:30:55 | 2019-01-24T19:30:55 | 164,693,051 | 0 | 0 | Apache-2.0 | 2019-01-09T21:10:50 | 2019-01-08T16:48:11 | Python | UTF-8 | Python | false | false | 2,783 | py | #!/usr/bin/env python
"""Tests for signed-binary DB functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_server import db
from grr_response_server.rdfvalues import objects as rdf_objects
_test_id1 = rdf_objects.SignedBinaryID(
binary_type=rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE,
path="linux/test/hello")
_test_id2 = rdf_objects.SignedBinaryID(
binary_type=rdf_objects.SignedBinaryID.BinaryType.PYTHON_HACK,
path="windows/test/hello")
_test_references1 = rdf_objects.BlobReferences(items=[
rdf_objects.BlobReference(offset=0, size=2, blob_id=b"\xaa" * 32),
rdf_objects.BlobReference(offset=2, size=3, blob_id=b"\xbb" * 32),
])
_test_references2 = rdf_objects.BlobReferences(items=[
rdf_objects.BlobReference(offset=0, size=3, blob_id=b"\xcc" * 32),
rdf_objects.BlobReference(offset=3, size=2, blob_id=b"\xdd" * 32),
])
class DatabaseTestSignedBinariesMixin(object):
"""Mixin that adds tests for signed binary DB functionality."""
def testReadSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
stored_hash_id, stored_timestamp = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_hash_id, _test_references1)
self.assertGreater(stored_timestamp.AsMicrosecondsSinceEpoch(), 0)
def testUpdateSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
stored_references1, timestamp1 = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_references1, _test_references1)
self.db.WriteSignedBinaryReferences(_test_id1, _test_references2)
stored_references2, timestamp2 = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_references2, _test_references2)
self.assertGreater(timestamp2, timestamp1)
def testUnknownSignedBinary(self):
with self.assertRaises(db.UnknownSignedBinaryError):
self.db.ReadSignedBinaryReferences(_test_id1)
def testReadIDsForAllSignedBinaries(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
self.db.WriteSignedBinaryReferences(_test_id2, _test_references2)
self.assertCountEqual(self.db.ReadIDsForAllSignedBinaries(),
[_test_id1, _test_id2])
def testDeleteSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
self.assertNotEmpty(self.db.ReadIDsForAllSignedBinaries())
self.db.DeleteSignedBinaryReferences(_test_id1)
self.assertEmpty(self.db.ReadIDsForAllSignedBinaries())
# Trying to delete again shouldn't raise.
self.db.DeleteSignedBinaryReferences(_test_id1)
| [
"realbushman@gmail.com"
] | realbushman@gmail.com |
8e5f20a19d1a66e2a925c5334c5b378f6d25ebeb | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/unsignedInt/Schema+Instance/NISTXML-SV-IV-list-unsignedInt-length-4-5.py | 16305329233288f206fde998286f6d1c62dad41b | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 429 | py | from output.models.nist_data.list_pkg.unsigned_int.schema_instance.nistschema_sv_iv_list_unsigned_int_length_4_xsd.nistschema_sv_iv_list_unsigned_int_length_4 import NistschemaSvIvListUnsignedIntLength4
obj = NistschemaSvIvListUnsignedIntLength4(
value=[
4181655182,
4161446171,
4162425164,
4161223171,
4122532161,
4171446151,
4143223171,
4163645122,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
5821e591b24ac99aeb3838f49ef97c8f5476ad06 | e8404eb4d9aa8b483083823c3d7720ce0958e6ce | /practice20h.py | f2b2241bc3f258801305a19ee9cf5612d912854d | [] | no_license | harmansehmbi/Project20 | cdd0c98f23e1ad6f1adf22a2c18168b102e37fc4 | 22e43d4fb4ea33bcc18ed946cd031a171aba13df | refs/heads/master | 2020-06-12T10:30:02.893088 | 2019-06-28T12:42:28 | 2019-06-28T12:42:28 | 194,271,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import matplotlib.pyplot as plt
X = list(range(1,11))
# List Comprehension
Y1 = [n for n in X]
Y2 = [n*n for n in X]
Y3 = [n*n*n for n in X]
print(X)
print(Y1)
print(Y2)
print(Y3)
plt.plot(X, Y1, label="Y1")
plt.plot(X, Y2, label="Y2")
plt.plot(X, Y3, label="Y3")
plt.legend() # How we can place a legend on different positions -> Explore
plt.xlabel("X-Axis")
plt.xlabel("Y-Axis")
plt.title("Polynomial Graph")
plt.grid(True)
plt.show() | [
"51370954+harmansehmbi@users.noreply.github.com"
] | 51370954+harmansehmbi@users.noreply.github.com |
18013e2b65ef9381809646e3867756c3f08eefef | b378950c5ec10db8c7921df1261d62b5b74581e8 | /gae/lib/django/tests/modeltests/many_to_one_null/models.py | fb0f6ac3b7707feb5ea92d8d875ed9300209d290 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | towerjoo/CS-notes | 11e78395423ec097840e354c6571400db149e807 | a5704ceea72caab2a458b0f212d69041c4c2a3ce | refs/heads/master | 2020-06-02T19:26:11.524649 | 2010-09-08T02:08:16 | 2010-09-08T02:08:16 | 895,186 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | """
16. Many-to-one relationships that can be null
To define a many-to-one relationship that can have a null foreign key, use
``ForeignKey()`` with ``null=True`` .
"""
from django.db import models
class Reporter(models.Model):
name = models.CharField(maxlength=30)
def __str__(self):
return self.name
class Article(models.Model):
headline = models.CharField(maxlength=100)
reporter = models.ForeignKey(Reporter, null=True)
class Meta:
ordering = ('headline',)
def __str__(self):
return self.headline
__test__ = {'API_TESTS':"""
# Create a Reporter.
>>> r = Reporter(name='John Smith')
>>> r.save()
# Create an Article.
>>> a = Article(headline="First", reporter=r)
>>> a.save()
>>> a.reporter.id
1
>>> a.reporter
<Reporter: John Smith>
# Article objects have access to their related Reporter objects.
>>> r = a.reporter
# Create an Article via the Reporter object.
>>> a2 = r.article_set.create(headline="Second")
>>> a2
<Article: Second>
>>> a2.reporter.id
1
# Reporter objects have access to their related Article objects.
>>> r.article_set.all()
[<Article: First>, <Article: Second>]
>>> r.article_set.filter(headline__startswith='Fir')
[<Article: First>]
>>> r.article_set.count()
2
# Create an Article with no Reporter by passing "reporter=None".
>>> a3 = Article(headline="Third", reporter=None)
>>> a3.save()
>>> a3.id
3
>>> print a3.reporter
None
# Need to reget a3 to refresh the cache
>>> a3 = Article.objects.get(pk=3)
>>> print a3.reporter.id
Traceback (most recent call last):
...
AttributeError: 'NoneType' object has no attribute 'id'
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
>>> print a3.reporter
None
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
>>> Article.objects.filter(reporter__isnull=True)
[<Article: Third>]
# Set the reporter for the Third article
>>> r.article_set.add(a3)
>>> r.article_set.all()
[<Article: First>, <Article: Second>, <Article: Third>]
# Remove an article from the set, and check that it was removed.
>>> r.article_set.remove(a3)
>>> r.article_set.all()
[<Article: First>, <Article: Second>]
>>> Article.objects.filter(reporter__isnull=True)
[<Article: Third>]
# Create another article and reporter
>>> r2 = Reporter(name='Paul Jones')
>>> r2.save()
>>> a4 = r2.article_set.create(headline='Fourth')
>>> r2.article_set.all()
[<Article: Fourth>]
# Try to remove a4 from a set it does not belong to
>>> r.article_set.remove(a4)
Traceback (most recent call last):
...
DoesNotExist: <Article: Fourth> is not related to <Reporter: John Smith>.
>>> r2.article_set.all()
[<Article: Fourth>]
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of set that are not in the assignment set are set null
>>> r2.article_set = [a2, a3]
>>> r2.article_set.all()
[<Article: Second>, <Article: Third>]
# Clear the rest of the set
>>> r.article_set.clear()
>>> r.article_set.all()
[]
>>> Article.objects.filter(reporter__isnull=True)
[<Article: First>, <Article: Fourth>]
"""}
| [
"zhutao@halfquest.com"
] | zhutao@halfquest.com |
77c436d784307c76055161cb9812c3027cfe4c00 | f43c49e41e61714617fb7442ea408485a20d81a5 | /dfirtrack_main/importer/file/markdown.py | 0ada04d8bdf823f7958a9a508c335515d871ccf4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PiterPentester/dfirtrack | 2408181e2058133da2cc64c43756552e9b04a532 | e6baaa580dbc9fa26b582397834c0a0855da0940 | refs/heads/master | 2020-05-31T17:25:09.023170 | 2019-05-23T19:12:15 | 2019-05-23T19:12:15 | 190,407,697 | 1 | 0 | NOASSERTION | 2020-04-10T10:15:07 | 2019-06-05T14:13:29 | HTML | UTF-8 | Python | false | false | 4,617 | py | from dateutil.parser import parse
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from dfirtrack_main.forms import EntryFileImport
from dfirtrack_main.logger.default_logger import debug_logger, warning_logger
from dfirtrack_main.models import Entry
import hashlib
from io import TextIOWrapper
@login_required(login_url="/login")
def entrys(request):
""" this form parses a file and tries to get entries for a single system """
# form was valid to post
if request.method == "POST":
# call logger
debug_logger(str(request.user), " ENTRY_TXT_IMPORTER_BEGIN")
# get text out of file (variable results from request object via file upload field)
entryfile = TextIOWrapper(request.FILES['entryfile'].file, encoding=request.encoding)
# set row counter (needed for logger)
i = 0
# iterate over rows in file
for row in entryfile:
# autoincrement row counter
i += 1
# skip first two lines # TODO: remove first two lines from parsing script
if i == 1 or i == 2:
continue
# split line from markdown table format to single values
column = row.split("|")
# check row for empty value
if len(column) < 6:
warning_logger(str(request.user), " ENTRY_TXT_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":empty_row")
continue
# get values of former markdown tables
entry_date = column[1]
entry_utc = column[2]
entry_system = column[3]
entry_type = column[4]
entry_content = column[5]
# remove trailing and leading whitespaces
entry_date = entry_date.strip()
entry_utc = entry_utc.strip()
entry_system = entry_system.strip()
entry_type = entry_type.strip()
entry_content = entry_content.strip()
# concatenate all relevant entry values to a string for hashing
entry_string = entry_date + entry_utc + entry_system + entry_type + entry_content
# calculate hash from entry values
entry_hash = hashlib.sha1(entry_string.encode('utf8'))
entry_sha1 = entry_hash.hexdigest()
# get system_id as string from POST object
system = request.POST['system']
# check for existing entry_sha1 for this system and skip if it already exist
try:
check_entry = Entry.objects.get(system=system, entry_sha1=entry_sha1)
warning_logger(str(request.user), " ENTRY_TXT_IMPORTER_ENTRY_EXISTS " + "row_" + str(i) + ":entry_exists")
continue
except:
pass
# convert timing information to datetime object
entry_time = parse(entry_date + " " + entry_utc + "+00:00")
# create form with request data
form = EntryFileImport(request.POST, request.FILES)
# create entry
if form.is_valid():
pass
# don't save form yet
entry = form.save(commit=False)
# set values from file (row / column)
entry.entry_time = entry_time
entry.entry_sha1 = entry_sha1
entry.entry_date = entry_date
entry.entry_utc = entry_utc
entry.entry_system = entry_system
entry.entry_type = entry_type
entry.entry_content = entry_content
# set auto values
entry.entry_created_by_user_id = request.user
entry.entry_modified_by_user_id = request.user
# save object
entry.save()
# call logger
entry.logger(str(request.user), ' ENTRY_TXT_IMPORTER_EXECUTED')
# call logger
debug_logger(str(request.user), " ENTRY_TXT_IMPORTER_END")
return redirect('/systems/'+ system)
else:
# show empty form with preselected system
if request.method == 'GET' and 'system' in request.GET:
system = request.GET['system']
form = EntryFileImport(initial={
'system': system,
})
else:
# show empty form
form = EntryFileImport()
# call logger
debug_logger(str(request.user), " ENTRY_TXT_IMPORTER_ENTERED")
return render(request, 'dfirtrack_main/entry/entrys_file_importer.html', {'form': form})
| [
"mathias.stuhlmacher@gmx.de"
] | mathias.stuhlmacher@gmx.de |
7dcdf7f5efd7878a74aff1e9ef7205eedeab6d3d | 86f8bf3933208329eb73bfcba5e1318dbb2ddafa | /hello_world/django/benckmark/wsgi.py | 39449f5a6e5611a43eef292c969e3ffe19f098ea | [] | no_license | TakesxiSximada/benchmarks | 42ce5466c813e45db78f87ca391806fbb845a16c | 9cd2fc732ed006fd3554e01b1fc71bfcb3ada312 | refs/heads/master | 2021-01-15T23:02:14.063157 | 2015-05-30T18:52:08 | 2015-05-30T18:52:08 | 36,551,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for benckmark project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "benckmark.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"takesxi.sximada@gmail.com"
] | takesxi.sximada@gmail.com |
d09894a4407767279783f2300c505fdeee58d511 | 0cbc02dd7d1efbe61de04dcf1c6eccb6496bf074 | /month02/AID1912/day07/baidu.py | d26b8dd65faba071eedd46919db85150e638f315 | [] | no_license | fsym-fs/Python_AID | 0b1755c15e20b214940041e81bedb2d5ec99e3f9 | f806bb02cdb1670cfbea6e57846abddf3972b73b | refs/heads/master | 2021-03-20T06:57:45.441245 | 2020-05-27T14:13:45 | 2020-05-27T14:13:45 | 247,187,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | import requests
import base64
import json
ai_list = {'植物':'/v1/plant','动物':'/v1/animal','其他':'/v2/advanced_general'}
def baidu(type,path):
if type in ai_list:
"""
https://aip.baidubce.com/rest/2.0/image-classify/v1/animal
"""
url="https://aip.baidubce.com/rest/2.0/image-classify%s?access_token=24.c36ae190ea9865133bbc1bf1e2d921d4.2592000.1577462210.282335-17874022"%(ai_list[type])
else:
return None
header = {
'Content-Type':'application/x-www-form-urlencoded'
}
data = {}
with open(path,'rb') as f:
image=base64.b64encode(f.read())
data['image'] = str(image,'utf-8')
res = requests.post(url=url,data=data,headers=header).text
return json.loads(res).get('result','Error')
if __name__ == '__main__':
print(baidu('动物','kl.jpg'))
| [
"1085414029@qq.com"
] | 1085414029@qq.com |
c7d7a4883c4ab514cadf97faae9ff73459bd33ab | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/139/usersdata/165/59838/submittedfiles/diagonaldominante.py | 075feccac89be566766f1e4839a07d0f21253793 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # -*- coding: utf-8 -*-
import numpy as np
def diagonaldominante(a):
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
soma=soma+a[i,j]
soma=soma-a[i,i]
if soma<=a[i,i]:
return False
return True
n=int(input('digite a ordem da matriz:'))
a=np.zeros((n,n))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=int(input('digite um numero:'))
if diagonaldominante(a)==True:
print('SIM')
else:
print('NAO')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
28444482d26ea8121cb836029ad3e93d17809a1f | 1b48b3980abbe11691310a7f35efef62bc0ae831 | /Qt/QState/rogue.py | 54a6897f40b4217183ae0b7b120dd1c64d4ca3cb | [] | no_license | FXTD-ODYSSEY/MayaScript | 7619b1ebbd664988a553167262c082cd01ab80d5 | 095d6587d6620469e0f1803d59a506682714da17 | refs/heads/master | 2022-11-05T08:37:16.417181 | 2022-10-31T11:50:26 | 2022-10-31T11:50:26 | 224,664,871 | 45 | 11 | null | null | null | null | UTF-8 | Python | false | false | 7,836 | py |
#############################################################################
##
## Copyright (C) 2010 velociraptor Genjix <aphidia@hotmail.com>
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
import os
import sys
repo = (lambda f:lambda p=__file__:f(f,p))(lambda f,p: p if [d for d in os.listdir(p if os.path.isdir(p) else os.path.dirname(p)) if d == '.git'] else None if os.path.dirname(p) == p else f(f,os.path.dirname(p)))()
MODULE = os.path.join(repo,'_vendor','Qt')
sys.path.insert(0,MODULE) if MODULE not in sys.path else None
from Qt.QtGui import *
from Qt.QtCore import *
from Qt.QtWidgets import *
# class MovementTransition(QEventTransition):
class MovementTransition(QEventTransition):
def __init__(self, window):
super(MovementTransition, self).__init__(window, QEvent.KeyPress)
self.window = window
def eventTest(self, event):
# print ("eventTest event",event.type())
if event.type() == QEvent.StateMachineWrapped and \
event.event().type() == QEvent.KeyPress:
key = event.event().key()
return key == Qt.Key_2 or key == Qt.Key_8 or \
key == Qt.Key_6 or key == Qt.Key_4
return False
def onTransition(self, event):
key = event.event().key()
# print ("onTransition event",event.type())
if key == Qt.Key_4:
self.window.movePlayer(self.window.Left)
if key == Qt.Key_8:
self.window.movePlayer(self.window.Up)
if key == Qt.Key_6:
self.window.movePlayer(self.window.Right)
if key == Qt.Key_2:
self.window.movePlayer(self.window.Down)
class Custom(QState):
def __init__(self, parent, mw):
super(Custom, self).__init__(parent)
self.mw = mw
def onEntry(self, e):
print(self.mw.status)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.pX = 5
self.pY = 5
self.width = 35
self.height = 20
self.statusStr = ''
database = QFontDatabase()
font = QFont()
if 'Monospace' in database.families():
font = QFont('Monospace', 12)
else:
for family in database.families():
if database.isFixedPitch(family):
font = QFont(family, 12)
self.setFont(font)
self.setupMap()
self.buildMachine()
self.show()
def setupMap(self):
self.map = []
qsrand(QTime(0, 0, 0).secsTo(QTime.currentTime()))
for x in range(self.width):
column = []
for y in range(self.height):
if x == 0 or x == self.width - 1 or y == 0 or \
y == self.height - 1 or qrand() % 40 == 0:
column.append('#')
else:
column.append('.')
self.map.append(column)
def buildMachine(self):
machine = QStateMachine(self)
inputState = Custom(machine, self)
# this line sets the status
self.status = 'hello!'
# however this line does not
inputState.assignProperty(self, 'status', 'Move the rogue with 2, 4, 6, and 8')
transition = MovementTransition(self)
inputState.addTransition(transition)
quitState = QState(machine)
quitState.assignProperty(self, 'status', 'Really quit(y/n)?')
yesTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_Y)
self.finalState = QFinalState(machine)
yesTransition.setTargetState(self.finalState)
quitState.addTransition(yesTransition)
noTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_N)
noTransition.setTargetState(inputState)
quitState.addTransition(noTransition)
quitTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_Q)
quitTransition.setTargetState(quitState)
inputState.addTransition(quitTransition)
machine.setInitialState(inputState)
machine.finished.connect(QApplication.quit)
machine.start()
def sizeHint(self):
metrics = QFontMetrics(self.font())
return QSize(metrics.width('X') * self.width, metrics.height() * (self.height + 1))
def paintEvent(self, event):
metrics = QFontMetrics(self.font())
painter = QPainter(self)
fontHeight = metrics.height()
fontWidth = metrics.width('X')
painter.fillRect(self.rect(), Qt.black)
painter.setPen(Qt.white)
yPos = fontHeight
painter.drawText(QPoint(0, yPos), self.status)
for y in range(self.height):
yPos += fontHeight
xPos = 0
for x in range(self.width):
if y == self.pY and x == self.pX:
xPos += fontWidth
continue
painter.drawText(QPoint(xPos, yPos), self.map[x][y])
xPos += fontWidth
painter.drawText(QPoint(self.pX * fontWidth, (self.pY + 2) * fontHeight), '@')
def movePlayer(self, direction):
if direction == self.Left:
if self.map[self.pX - 1][self.pY] != '#':
self.pX -= 1
elif direction == self.Right:
if self.map[self.pX + 1][self.pY] != '#':
self.pX += 1
elif direction == self.Up:
if self.map[self.pX][self.pY - 1] != '#':
self.pY -= 1
elif direction == self.Down:
if self.map[self.pX][self.pY + 1] != '#':
self.pY += 1
self.repaint()
def getStatus(self):
return self.statusStr
def setStatus(self, status):
self.statusStr = status
self.repaint()
status = Property(str, getStatus, setStatus)
Up = 0
Down = 1
Left = 2
Right = 3
Width = 35
Height = 20
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWin = MainWindow()
sys.exit(app.exec_())
| [
"timmyliang@tencent.com"
] | timmyliang@tencent.com |
38ae20856df89827f8a00aa25a5e08cb9eb6ceea | 04975a41eb459f1528dcbdcb1143a3cb535aa620 | /Array_easy/leetcode_1480.py | e74dd07db830ae28ab16b1a7251a89f541404fe5 | [] | no_license | RickLee910/Leetcode_easy | 2a50d632379826979a985e1b9950d4cf6bbd8b18 | c2687daf334f96a908737067bb915b8b072d0d56 | refs/heads/master | 2023-01-29T11:09:26.701243 | 2020-12-02T04:36:14 | 2020-12-02T04:36:14 | 294,952,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | class Solution:
def runningSum(self, nums):
temp = []
for i in range(len(nums)):
temp.append(sum(nums[0:i + 1]))
return temp
| [
"13554543910@163.com"
] | 13554543910@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.