blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d0a5805d8de348ebd9b1de3b91221773c58040fc | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Machine Learning Scientist with Python/19. Image Processing with Keras in Python/03. Going Deeper/04. Write your own pooling operation.py | af1617492f97a83b0f689958bdbfb890957a90cf | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | '''
Write your own pooling operation
As we have seen before, CNNs can have a lot of parameters. Pooling layers are often added between the convolutional layers of a neural network to summarize their outputs in a condensed manner, and reduce the number of parameters in the next layer in the network. This can help us if we want to train the network more rapidly, or if we don't have enough data to learn a very large number of parameters.
A pooling layer can be described as a particular kind of convolution. For every window in the input it finds the maximal pixel value and passes only this pixel through. In this exercise, you will write your own max pooling operation, based on the code that you previously used to write a two-dimensional convolution operation.
Instructions
100 XP
Index into the input array (im) and select the right window.
Find the maximum in this window.
Allocate this into the right entry in the output array (result).
'''
SOLUTION
# Result placeholder
result = np.zeros((im.shape[0]//2, im.shape[1]//2))
# Pooling operation
for ii in range(result.shape[0]):
for jj in range(result.shape[1]):
result[ii, jj] = np.max(im[ii*2:ii*2+2, jj*2:jj*2+2]) | [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
09ea2d36a061bd2ef4ac973b25a693b6625b6703 | df0062217e45a1fe9d9af83ba1768aab385d2c28 | /proboscis/decorators.py | 54f5264568242dca31520f87584c5c0fcbfd74c5 | [
"Apache-2.0"
] | permissive | rassilon/python-proboscis | 678b20a149a22b036d2fb3044a53a9a1a02cedc7 | 214c1c317c6575ecc1b3ccb2dc60303d57fbc417 | refs/heads/master | 2020-12-24T16:58:46.572787 | 2012-07-09T22:37:32 | 2012-07-09T22:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,536 | py | # Copyright (c) 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Decorators useful to the tests."""
from functools import wraps
from proboscis.asserts import assert_raises_instance
from proboscis import compatability
from proboscis.core import TestRegistry
DEFAULT_REGISTRY = TestRegistry()
def expect_exception(exception_type):
"""Decorates a test method to show it expects an exception to be raised."""
def return_method(method):
@wraps(method)
def new_method(*args, **kwargs):
assert_raises_instance(exception_type, method, *args, **kwargs)
return new_method
return return_method
class TimeoutError(RuntimeError):
"""Thrown when a method has exceeded the time allowed."""
pass
def time_out(time):
"""Raises TimeoutError if the decorated method does not finish in time."""
if not compatability.supports_time_out():
raise ImportError("time_out not supported for this version of Python.")
import signal
def cb_timeout(signum, frame):
raise TimeoutError("Time out after waiting " + str(time) + " seconds.")
def return_method(func):
"""Turns function into decorated function."""
@wraps(func)
def new_method(*kargs, **kwargs):
previous_handler = signal.signal(signal.SIGALRM, cb_timeout)
try:
signal.alarm(time)
return func(*kargs, **kwargs)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, previous_handler)
return new_method
return return_method
def register(**kwargs):
"""Registers a test in proboscis's default registry.
:param home: The target class or function.
This also allows all of the parameters used by the @test decorator.
This function works differently than a decorator as it allows the class or
function which is being registered to appear in the same call as all of the
options.
Its designed to make it easier to register class or functions with
Proboscis after they're defined.
"""
DEFAULT_REGISTRY.register(**kwargs)
def test(home=None, **kwargs):
"""Decorates a test class or function to cause Proboscis to run it.
The behavior differs depending the target:
- If put on a stand-alone function, the function will run by itself.
- If put on a class inheriting unittest.TestCase, then the class will
run just like a normal unittest class by using the method names and
instantiate a new instance of the class for each test method.
- If the class does not inherit from unittest.TestCase, the class will
be instantiated once and this instance will be passed to each method
decorated with @test (this increases encapsulation over using class
fields as the instance can not be accessed outside of its methods).
Note that due to how decorators work its impossible to know if a
function is or is not part of a class; thus if a class method is
decorated with test but its class is not then
ProboscisTestMethodNotDecorated will be raised.
:param groups: A list of strings representing the groups this test method
or class belongs to. By default this is an empty list.
:param depends_on: A list of test functions or classes which must run
before this test. By default this is an empty list.
:param depends_on_groups: A list of strings each naming a group that must
run before this test. By default this is an empty
list.
:param enabled: By default, true. If set to false this test will not run.
:param always_run: If true this test will run even if the tests listed in
depends_on or depends_on_groups have failed.
"""
if home:
return DEFAULT_REGISTRY.register(home, **kwargs)
else:
def cb_method(home_2):
return DEFAULT_REGISTRY.register(home_2, **kwargs)
return cb_method
def before_class(home=None, **kwargs):
"""Like @test but indicates this should run before other class methods.
All of the arguments sent to @test work with this decorator as well.
"""
kwargs.update({'run_before_class':True})
return test(home=home, **kwargs)
def after_class(home=None, **kwargs):
"""Like @test but indicates this should run after other class methods.
This will run even if methods inside the class fail.
All of the arguments sent to @test work with this decorator as well.
"""
kwargs.update({'run_after_class':True})
return test(home=home, **kwargs)
def factory(func=None, **kwargs):
"""Decorates a function which returns new instances of Test classes."""
if func:
return DEFAULT_REGISTRY.register_factory(func)
else:
raise ValueError("Arguments not supported on factories.")
| [
"tim.simpson@rackspace.com"
] | tim.simpson@rackspace.com |
abe20bfb2a6b3bcbe0ba10177cc733b42e8086ec | b1303152c3977a22ff9a0192c0c32310e65a6d77 | /python/109.convert-sorted-list-to-binary-search-tree.py | e02cf628fa34d2da85972e66a21ada048e2fdcaf | [
"Apache-2.0"
] | permissive | stavanmehta/leetcode | 1b8da1c2bfacaa76ddfb96b8dbce03bf08c54c27 | 1224e43ce29430c840e65daae3b343182e24709c | refs/heads/master | 2021-07-15T16:02:16.107962 | 2021-06-24T05:39:14 | 2021-06-24T05:39:14 | 201,658,706 | 0 | 0 | Apache-2.0 | 2021-06-24T05:39:15 | 2019-08-10T16:59:32 | Java | UTF-8 | Python | false | false | 382 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
| [
"noreply@github.com"
] | stavanmehta.noreply@github.com |
0b125c0bcb4a80ca0f836c735b815a89f509b757 | 64cd09628f599fe18bf38528309349f7ac0df71e | /Introduction/10_Introduction_tensorflow/8_Tensorflow_functions/neg or -.py | 59e33f4ff044010ae0dcca84e5ff111aaca3e10b | [] | no_license | JunyoungJang/Python | 958c057b2fd37c03876d3cf566ee27ee637bb020 | 76d4cd441deff8061e10608e0848360bc4f34490 | refs/heads/master | 2021-01-19T21:54:42.208469 | 2020-02-14T09:54:17 | 2020-02-14T09:54:17 | 83,768,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import tensorflow as tf
X = tf.constant([[2., 2.], [-3., 3.]])
# tf.neg(X) is equivalent to - X
a = tf.neg(X)
b = - X
with tf.Session() as sess:
print sess.run(a)
print sess.run(b)
| [
"lakino@yonsei.ac.kr"
] | lakino@yonsei.ac.kr |
aa7a142d7fefe454e13433d58de97643501b3332 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/gigasecond/bf994fcc2d8b4c1c9fdea7adaf87f2e0.py | 5dcf780303f3a3dbbff6c18d0077b59ff3fd94f6 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 216 | py | from datetime import datetime
from datetime import timedelta
def add_gigasecond(initial_date):
gigasecond = 10**9
return initial_date + timedelta(seconds = gigasecond)
print add_gigasecond(datetime(2011, 4, 25))
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
a3871737df16daae3fef7ab62128c7f893347cfb | e75a890b39f046b2a44e3433acabc5dd12be7dbd | /leecode/9.回文数.py | 30763f64ebc4fad122eff3357105915a75a8ac0e | [] | no_license | zzf531/leetcode | 53c82ad96fef66ab666b658c1a60b9f81646c72a | cdb22e44c9fac2bc06a840bf7433aeb9be9ae2b2 | refs/heads/master | 2020-08-03T03:47:16.895530 | 2020-03-30T02:48:20 | 2020-03-30T02:48:20 | 211,615,639 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0:return False
s = str(x)
s2 = s[::-1]
i = int(s2)
if i == x and x == 0:
return True
else:
return False
a = Solution()
print(a.isPalindrome(0))
| [
"2315519934@qq.com"
] | 2315519934@qq.com |
f53ada130a5b5651bd8b1089ba2582f5cb6eb12b | 5651e0d643e13d9f309e5ce5272a393d570e451f | /sla_cli/src/db/schema.py | d4561cfb06839df25e468b777c1b7f5a973a02ba | [
"MIT",
"CC-BY-4.0"
] | permissive | rdoreilly/SLA-CLI | a722ba5cf435399215c0368cf26a44a8f5c16957 | c92ca8a6e57eb51bf9c9433013ce16d443f8d152 | refs/heads/main | 2023-04-03T23:26:17.741218 | 2021-04-15T11:07:42 | 2021-04-15T11:07:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,381 | py | """
Author: David Walshe
Date: 07 April 2021
"""
import logging
from typing import Dict, List, Union
import json
import attr
from attr.validators import instance_of
from colorama import Fore
from sla_cli.src.common.path import Path
logger = logging.getLogger(__name__)
@attr.s
class Schema:
pass
@attr.s
class Info(Schema):
"""
Maps the meta information of a dataset object.
"""
availability: str = attr.ib(validator=instance_of(str))
capture_method: str = attr.ib(validator=instance_of(str))
size: float = attr.ib(validator=instance_of(float), converter=lambda size: round(float(size), 2))
references: Union[List[str]] = attr.ib(validator=instance_of(list))
download: Union[List[str], None] = attr.ib(default=[""], converter=lambda config: [] if config is None else config)
def __getitem__(self, item):
"""Allows for [] indexing."""
return self.__getattribute__(item)
def __str__(self):
indent = "\n - "
return f" Availability: {Fore.LIGHTGREEN_EX if self.availability.lower() == 'public' else Fore.LIGHTRED_EX}{self.availability}{Fore.RESET}\n" \
f" Capture method: {Fore.LIGHTCYAN_EX if self.capture_method.lower() == 'dermoscopy' else Fore.LIGHTYELLOW_EX}{self.capture_method}{Fore.RESET}\n" \
f" Size: {'--' if self.size < 0 else round(self.size, 2)} MB\n" \
f" References:\n" \
f" - {indent.join(self.references)}\n" \
f" Data source URL:\n" \
f" - {indent.join(self.download)}"
@attr.s
class Dataset(Schema):
"""
Maps to an individual dataset.
"""
info: Info = attr.ib(validator=instance_of(Info), converter=lambda config: Info(**config))
labels: Dict[str, int] = attr.ib(validator=instance_of(dict))
@attr.s
class Datasets(Schema):
"""
Maps to the available dataset statistics in the db file.
"""
atlas_of_dermoscopy: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
bcn_20000: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
bcn_2020_challenge: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
brisbane_isic_challenge_2020: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermofit: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermoscopedia_cc_by: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermis: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
dermquest: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
ham10000: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
isic_2020_challenge_mskcc_contribution: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
isic_2020_vienna_part_1: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
isic_2020_vienna_part_2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
jid_editorial_images_2018: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
mclass_d: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
mclass_nd: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
mednode: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_1: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_3: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_4: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
msk_5: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
pad_ufes_20: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
ph2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
sonic: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
sydney_mia_smdc_2020_isic_challenge_contribution: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
uda_1: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
uda_2: Dataset = attr.ib(validator=instance_of(Dataset), converter=lambda config: Dataset(**config))
@property
def as_dict(self):
"""Returns all scalar and collect objects for this class that are Dataset objects."""
return {key: value for key, value in self.__dict__.items() if isinstance(value, Dataset)}
@property
def labels(self):
"""Retrieves all the label entries for dataset objects."""
return {key: value.labels for key, value in self.as_dict.items()}
@property
def info(self):
"""Retrieves all the info entries for the dataset objects."""
return {key: value.info for key, value in self.as_dict.items()}
@property
def names(self):
"""Returns a list of all dataset names."""
return list(self.as_dict.keys())
def __getitem__(self, item) -> Dataset:
"""Allows [] indexing of attributes."""
return self.__getattribute__(item)
@attr.s
class DB(Schema):
"""
Maps to the db.json file.
"""
datasets: Datasets = attr.ib(validator=instance_of(Datasets), converter=lambda config: Datasets(**config))
abbrev: Dict[str, str] = attr.ib(validator=instance_of(dict))
@staticmethod
def get_db():
"""
Factory method to return an instance of the DB object.
:return: A instance of DB.
"""
with open(Path.db()) as fh:
db = json.load(fh)
return DB(**db)
| [
"david.walshe93@gmail.com"
] | david.walshe93@gmail.com |
39196050d48bc0215006c07b5fad2ebb8ef47221 | 59dd5ca4d22fc8b377b89977d68fa3c812e37d7b | /tests/case07_change_type/models_pre.py | 46645ed4624a371229a9d57aa0175129349d5946 | [] | no_license | buriy/deseb2 | 24b42996f3c503a87ba7f5d8f9abcfa09a293a5d | 26d5934ca1481a54a3e901b75f693869dcd0cb64 | refs/heads/master | 2021-01-01T15:30:48.017759 | 2008-10-13T06:13:14 | 2008-10-13T06:13:14 | 223,088 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | from django.db import models
import deseb
class Poll(models.Model):
"""this model originally had fields named pub_date and the_author. you can use either a str
or a tuple for the aka value. (tuples are used if you have changed its name more than once)"""
question = models.CharField(max_length=200, default='test')
pub_date = models.DateTimeField('date published', aka=('pub_date', 'publish_date'))
the_author = models.CharField(max_length=200, aka='the_author')
if deseb.version == 'trunk':
rank = models.FloatField(default=1)
else:
rank = models.FloatField(max_digits=5, decimal_places=2, default=3)
def __str__(self):
return self.question
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
votes = models.IntegerField(aka='votes')
def __str__(self):
return self.choice
| [
"burchik@gmail.com"
] | burchik@gmail.com |
b94c10ffe1e2cebe6c2841f11409493c979fc88d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03806/s842087595.py | d6c3b9ca4a01b491de47129a0e62c223068e0ba0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | #!/usr/bin/env python3
#ABC54 D
import sys
import math
import bisect
sys.setrecursionlimit(1000000000)
from heapq import heappush, heappop
from collections import defaultdict
from itertools import accumulate
from collections import Counter
from collections import deque
from operator import itemgetter
from itertools import permutations
mod = 10**9 + 7
n,ma,mb = map(int,input().split())
x = [list(map(int,input().split())) for _ in range(n)]
sa = 0
sb = 0
for a,b,c in x:
sa += a
sb += b
dp = [[float('inf')]*(sb+1) for _ in range(sa+1)]
dp[0][0] = 0
for a,b,c in x:
y = [[True]*(sb+1) for _ in range(sa+1)]
for i in range(sa+1-a):
for j in range(sb+1-b):
if y[i][j]:
if dp[i+a][j+b] > dp[i][j] + c:
dp[i+a][j+b] = dp[i][j] + c
y[i+a][j+b] = False
ans = float('inf')
for i in range(1,sa+1):
for j in range(1,sb+1):
if i*mb == j*ma:
ans = min(ans,dp[i][j])
if ans == float('inf'):
print(-1)
else:
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0f208f06bcdb8fb441e0b83c7cf87c10e7b1823e | 000a4b227d970cdc6c8db192f4437698cb782721 | /python/helpers/typeshed/stdlib/importlib/__init__.pyi | 0e99786775b0b6fbcc2709c7e75cbd15358973c7 | [
"MIT",
"Apache-2.0"
] | permissive | trinhanhngoc/intellij-community | 2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d | 1d4a962cfda308a73e0a7ef75186aaa4b15d1e17 | refs/heads/master | 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 | Apache-2.0 | 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null | UTF-8 | Python | false | false | 791 | pyi | from importlib.abc import Loader
from types import ModuleType
from typing import Mapping, Sequence
__all__ = ["__import__", "import_module", "invalidate_caches", "reload"]
# Signature of `builtins.__import__` should be kept identical to `importlib.__import__`
def __import__(
name: str,
globals: Mapping[str, object] | None = ...,
locals: Mapping[str, object] | None = ...,
fromlist: Sequence[str] = ...,
level: int = ...,
) -> ModuleType: ...
# `importlib.import_module` return type should be kept the same as `builtins.__import__`
def import_module(name: str, package: str | None = ...) -> ModuleType: ...
def find_loader(name: str, path: str | None = ...) -> Loader | None: ...
def invalidate_caches() -> None: ...
def reload(module: ModuleType) -> ModuleType: ...
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
12571535c2293ff73d15e6d288a9fb979737dd66 | 2d40a56ca2e9f8a37018eba1edfe3f93f7bd2176 | /app/__init__.py | 5ef65987471aee2297711a150741be280508af5c | [
"MIT"
] | permissive | Mariga123/BLOGS | 27f119d12c50a1b3e39e62f091b2dec362a63f08 | 5578540f5fc9ec3aed73d7cca869117d2df9a298 | refs/heads/master | 2023-01-31T01:04:12.795202 | 2020-12-15T23:04:57 | 2020-12-15T23:04:57 | 320,508,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | from flask import Flask
from config import config_options
from flask_mail import Mail
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_uploads import IMAGES, UploadSet, configure_uploads
db = SQLAlchemy()
mail = Mail()
bootstap = Bootstrap()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
photos = UploadSet('photos', IMAGES)
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
from .auth import auth as authentication_blueprint
from .main import main as main_blueprint
# app.add_url_rule('/', endpoint='main.index')
# app.register_blueprint(auth_blueprint, url_prefix='/authenticate')
app.register_blueprint(authentication_blueprint)
app.register_blueprint(main_blueprint)
login_manager.init_app(app)
db.init_app(app)
bootstap.init_app(app)
configure_uploads(app, photos)
mail.init_app(app)
return app
| [
"johnmariga8@gmail.com"
] | johnmariga8@gmail.com |
c100e4099c1b15f54c023a6ded6ae3dbe74cd1e2 | 10f1bbac126bb187febc630ab13b09ac6d9270cb | /Examples/swordfish.py | a86c83506091e74070d7a8fc63061f23c89010ff | [] | no_license | akshirapov/automate-the-boring-stuff | 481827efd8f53117d73bc2f6b846b49736bb9d46 | fb36141e249f8c5db8e1c1c40856d5f8134606cc | refs/heads/master | 2022-12-17T10:55:33.412077 | 2020-01-10T16:02:13 | 2020-01-10T16:02:13 | 187,468,744 | 0 | 0 | null | 2022-12-08T05:15:44 | 2019-05-19T11:37:46 | Python | UTF-8 | Python | false | false | 253 | py | while True:
print('Who are you?')
name = input()
if name != 'Joe':
continue
print('Hello, Joe. What is the password? (It is a fish)')
password = input()
if password == 'swordfish':
break
print('Access granted.')
| [
"cccp2006_06@mail.ru"
] | cccp2006_06@mail.ru |
b72ab49459fa091603bd0dbb3eb1c0427da0a8b8 | 2448c41b6914cce852a6b0624298936029d62d0f | /apps/tests/accounts/token_tests.py | 0e514d08adb03afa128e5935047ed95f2d215c77 | [] | no_license | navill/ut_project | ade4f7ddede3096ee22a6f8f1d7da100bf73eacf | ef639e79bcdd59bd7b7d68edd185d88bfc82d4d3 | refs/heads/master | 2023-04-20T02:50:43.337465 | 2021-05-21T00:17:22 | 2021-05-21T00:17:22 | 314,426,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | import time
import pytest
from rest_framework.reverse import reverse
from rest_framework_simplejwt.token_blacklist.models import *
from rest_framework_simplejwt.utils import datetime_from_epoch
from accounts.api.authentications import CustomJWTTokenUserAuthentication, CustomRefreshToken
from accounts.models import *
@pytest.mark.django_db
def test_custom_refresh_token():
user = BaseUser.objects.get(id=2)
# expired time
token = CustomRefreshToken.for_user(user)
assert user.token_expired == token.access_token['exp']
assert BlacklistedToken.objects.all().exists() is False
outstanding_token = OutstandingToken.objects.first()
assert outstanding_token.token == str(token)
assert outstanding_token.jti == token['jti']
assert outstanding_token.expires_at == datetime_from_epoch(token['exp'])
token.blacklist()
black_token = BlacklistedToken.objects.get(token_id=outstanding_token.id)
assert black_token
@pytest.mark.django_db
def test_token_for_user_with_error(doctor_with_group):
with pytest.raises(Exception):
CustomRefreshToken.for_user(doctor_with_group.user, raise_error=True)
# CustomRefreshToken.for_user() 중간에 에러가 발생할 경우 user.token_expired=<epoch_time> 및 OutstandingToken은 생성되면 안됨
assert doctor_with_group.user.token_expired == 0
assert OutstandingToken.objects.all().exists() is False
CustomRefreshToken.for_user(doctor_with_group.user)
assert doctor_with_group.user.token_expired != 0
@pytest.mark.django_db
def test_authenticate_jwt_token_user(rf):
doctor = Doctor.objects.first()
token = CustomRefreshToken.for_user(doctor.user)
access_token = token.access_token
url = reverse('token-login')
request = rf.post(url, HTTP_AUTHORIZATION=f'Bearer {str(access_token)}')
authentication = CustomJWTTokenUserAuthentication()
auth_user, validated_token = authentication.authenticate(request)
assert auth_user == doctor.user
assert token['token_type'] == 'refresh'
assert access_token['token_type'] == 'access'
assert access_token['jti'] == validated_token['jti']
@pytest.mark.django_db
def test_compare_user_token_expired_with_accesstoken_expired(get_token_from_doctor):
doctor = Doctor.objects.first()
token = CustomRefreshToken.for_user(doctor.user)
access_token = token.access_token
# 토큰 타입 검사
assert get_token_from_doctor['token_type'] == 'refresh'
assert access_token['token_type'] == 'access'
# user 모델에 등록된 토큰 만료 시간과 발급된 토큰(access_token)의 만료 시간이 동일한지 확인
assert access_token['exp'] == doctor.user.token_expired
| [
"blue_jihoon@naver.com"
] | blue_jihoon@naver.com |
b6489a92789fa5c732d255eb213c6d5f6a9e3dd2 | 16ca50defdb822904aa310552ea614db3c50a7b8 | /src/posts/views.py | 1f3b72094a75a2a6ec9e4353cd07de97c6a9dd42 | [] | no_license | Elsaeed97/django-cbv | 5cd199ae5df7a94f9c93efa6e06d32292894f154 | 464f0a0da73b114e8c06ded4ee3c1e6fe9ece45e | refs/heads/master | 2020-07-15T09:21:53.545096 | 2019-09-02T20:31:05 | 2019-09-02T20:31:05 | 205,531,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | from django.shortcuts import render
from .models import Post
from django.views.generic import TemplateView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
# Create your views here.
class HomePage(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['posts'] = Post.objects.all()
return context
class PostsDetail(DeleteView):
context_object_name = 'post_details'
model = Post
template_name = 'posts/post_detail.html'
class PostCreate(CreateView):
fields = ('title', 'content','author')
model = Post
class PostUpdate(UpdateView):
fields = ('title', 'content')
model = Post
class PostDelete(DeleteView):
model = Post
success_url = reverse_lazy('home')
| [
"elsaeedahmed97@gmail.com"
] | elsaeedahmed97@gmail.com |
2a81b395742d7db7d66bb03781bd253dc966537d | d7016f69993570a1c55974582cda899ff70907ec | /sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2022_04_01/_application_insights_management_client.py | e03358a7d7f81eb9a7e45644bb061dfd5af07347 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 4,005 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import WorkbooksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ApplicationInsightsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Composite Swagger for Application Insights Management Client.
:ivar workbooks: WorkbooksOperations operations
:vartype workbooks: azure.mgmt.applicationinsights.v2022_04_01.operations.WorkbooksOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.workbooks = WorkbooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "ApplicationInsightsManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
d1cb8b220ff10c0541a9ae9919eca7d78c4451e2 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/DBPN_ID2917_for_PyTorch/dataset.py | 107b2ac1e75bf747ca40f827a1d8b65f0fbc430b | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,223 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import torch.utils.data as data
import torch
import numpy as np
import os
from os import listdir
from os.path import join
from PIL import Image, ImageOps
import random
from random import randrange
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
#y, _, _ = img.split()
return img
def rescale_img(img_in, scale):
size_in = img_in.size
new_size_in = tuple([int(x * scale) for x in size_in])
img_in = img_in.resize(new_size_in, resample=Image.BICUBIC)
return img_in
def get_patch(img_in, img_tar, img_bic, patch_size, scale, ix=-1, iy=-1):
(ih, iw) = img_in.size
(th, tw) = (scale * ih, scale * iw)
patch_mult = scale #if len(scale) > 1 else 1
tp = patch_mult * patch_size
ip = tp // scale
if ix == -1:
ix = random.randrange(0, iw - ip + 1)
if iy == -1:
iy = random.randrange(0, ih - ip + 1)
(tx, ty) = (scale * ix, scale * iy)
img_in = img_in.crop((iy,ix,iy + ip, ix + ip))
img_tar = img_tar.crop((ty,tx,ty + tp, tx + tp))
img_bic = img_bic.crop((ty,tx,ty + tp, tx + tp))
info_patch = {
'ix': ix, 'iy': iy, 'ip': ip, 'tx': tx, 'ty': ty, 'tp': tp}
return img_in, img_tar, img_bic, info_patch
def augment(img_in, img_tar, img_bic, flip_h=True, rot=True):
info_aug = {'flip_h': False, 'flip_v': False, 'trans': False}
if random.random() < 0.5 and flip_h:
img_in = ImageOps.flip(img_in)
img_tar = ImageOps.flip(img_tar)
img_bic = ImageOps.flip(img_bic)
info_aug['flip_h'] = True
if rot:
if random.random() < 0.5:
img_in = ImageOps.mirror(img_in)
img_tar = ImageOps.mirror(img_tar)
img_bic = ImageOps.mirror(img_bic)
info_aug['flip_v'] = True
if random.random() < 0.5:
img_in = img_in.rotate(180)
img_tar = img_tar.rotate(180)
img_bic = img_bic.rotate(180)
info_aug['trans'] = True
return img_in, img_tar, img_bic, info_aug
class DatasetFromFolder(data.Dataset):
def __init__(self, image_dir, patch_size, upscale_factor, data_augmentation, transform=None):
super(DatasetFromFolder, self).__init__()
self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)]
self.patch_size = patch_size
self.upscale_factor = upscale_factor
self.transform = transform
self.data_augmentation = data_augmentation
def __getitem__(self, index):
target = load_img(self.image_filenames[index])
input = target.resize((int(target.size[0]/self.upscale_factor),int(target.size[1]/self.upscale_factor)), Image.BICUBIC)
bicubic = rescale_img(input, self.upscale_factor)
input, target, bicubic, _ = get_patch(input,target,bicubic,self.patch_size, self.upscale_factor)
if self.data_augmentation:
input, target, bicubic, _ = augment(input, target, bicubic)
if self.transform:
input = self.transform(input)
bicubic = self.transform(bicubic)
target = self.transform(target)
return input, target, bicubic
def __len__(self):
return len(self.image_filenames)
class DatasetFromFolderEval(data.Dataset):
def __init__(self, lr_dir, upscale_factor, transform=None):
super(DatasetFromFolderEval, self).__init__()
self.image_filenames = [join(lr_dir, x) for x in listdir(lr_dir) if is_image_file(x)]
self.upscale_factor = upscale_factor
self.transform = transform
def __getitem__(self, index):
input = load_img(self.image_filenames[index])
_, file = os.path.split(self.image_filenames[index])
bicubic = rescale_img(input, self.upscale_factor)
if self.transform:
input = self.transform(input)
bicubic = self.transform(bicubic)
return input, bicubic, file
def __len__(self):
return len(self.image_filenames)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
16e946ad9dcd11ce7bf78b923c7d03ec8901301e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /HpJCBwggQMDLWTHsM_9.py | 899cb569cb973f6979a58e5944f0756877e31ba0 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | """
Create a function that takes in a sentence and returns the average length of
each word in that sentence. Round your result to two decimal places.
### Examples
average_word_length("A B C.") ➞ 1.00
average_word_length("What a gorgeous day.") ➞ 4.00
average_word_length("Dude, this is so awesome!") ➞ 3.80
### Notes
Ignore punctuation when counting the length of a word.
"""
def average_word_length(txt):
txt, tot = txt.split(), sum(sum(1 for c in w if c.isalpha()) for w in txt)
return round(tot / len(txt), 2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
87e1f5f4d8c3542a34727f8b44668b8a8d6c135a | 46baa88abe88e226afede3abf721c2056369745a | /articles/urls.py | 80ae9f355b51743f0982ba0a32d1efb679b0f46c | [] | no_license | almazkun/django_news_app | 267aa8775c2ffeba72e22f647b0db38f65a526ec | ae66fb1df0f87b3c52ad59546986b93a94c30083 | refs/heads/master | 2020-04-21T00:20:49.926012 | 2019-02-11T09:01:24 | 2019-02-11T09:01:24 | 169,193,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from django.urls import path
from .views import (
ArticleListView,
ArticleUpdateView,
ArticleDetailView,
ArticleDeleteView,
ArticleCreateView,
)
urlpatterns = [
path('<int:pk>/edit/', ArticleUpdateView.as_view(), name='article_edit'),
path('<int:pk>/', ArticleDetailView.as_view(), name='article_detail'),
path('<int:pk>/delete/', ArticleDeleteView.as_view(), name='article_delete'),
path('new/', ArticleCreateView.as_view(), name='article_new'),
path('', ArticleListView.as_view(), name='article_list'),
]
| [
"almaz.kun@gmail.com"
] | almaz.kun@gmail.com |
86b33544ef12bb04bf642015bf69fedbca7451c0 | 3fd6e85c36a7e9e4f9ddec163a55f3602ccfb98c | /old/imu/test_ukf_filter.py | 9fb00c012ec4fbe0edf8f64b2e68194f5d687353 | [
"Apache-2.0"
] | permissive | SiChiTong/mjmech | acc5da4ac6edd9f1446cc13e471aedeea3e1c419 | a71f35e6ad6bc9c1530a0a33d68c45d073390b79 | refs/heads/master | 2020-03-20T03:44:13.276650 | 2018-05-06T02:59:55 | 2018-05-06T03:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,718 | py | #!/usr/bin/python
# Copyright 2014 Josh Pieper, jjp@pobox.com. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from numpy import array
import unittest
import ukf_filter
class UkfFilterTest(unittest.TestCase):
def test_basic(self):
def test_process(x, dt_s):
return x + array(
[[0],
[x[0,0] * dt_s],
[x[1,0] * dt_s + 0.5 * x[0,0] * dt_s ** 2]])
def test_measurement(x):
return array([[x[2, 0]]])
dut = ukf_filter.UkfFilter(
initial_state=array([[0.2], [0.0], [0.0]]),
initial_covariance=numpy.diag([1.0, 2.0, 3.0]),
process_function=test_process,
process_noise=numpy.diag([0.1, 0.1, 0.1]),
measurement_function = test_measurement,
measurement_noise=array([[2.0]]))
meas = 0.5
for x in range(200):
meas += 0.5
dut.update_state(0.1)
dut.update_measurement(array([[meas]]))
self.assertAlmostEqual(round(dut.state[2, 0], 2), meas)
self.assertAlmostEqual(round(dut.state[1, 0], 2), 0.5 / 0.1)
if __name__ == '__main__':
unittest.main()
| [
"jjp@pobox.com"
] | jjp@pobox.com |
12757534cd6969e75c3a3b1f495af6b6da5536ba | 521a6a1f121f8dd569618b96184457c7427d20a9 | /compiler/tests/04_pnand2_test.py | bc066cfc726767bc881d3b6be153618e3d8a08bc | [
"BSD-3-Clause"
] | permissive | mguthaus/OpenRAM | e9682c9148be42cdd84d115d0855ce91dae5b567 | 46c86d3bb3df82e150532ede75cbf6180a697cfd | refs/heads/master | 2021-05-02T13:43:36.618374 | 2019-10-20T00:43:33 | 2019-10-20T00:43:33 | 216,284,207 | 1 | 1 | NOASSERTION | 2019-10-19T23:48:09 | 2019-10-19T23:48:09 | null | UTF-8 | Python | false | false | 993 | py | #!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys,os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
from sram_factory import factory
import debug
class pnand2_test(openram_test):
def runTest(self):
globals.init_openram("config_{0}".format(OPTS.tech_name))
debug.info(2, "Checking 2-input nand gate")
tx = factory.create(module_type="pnand2", size=1)
self.local_check(tx)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
| [
"mrg@ucsc.edu"
] | mrg@ucsc.edu |
856f691c05670bf2301b7c6348f2a85058d9f65a | 3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d | /chapter05/class_view_demo/class_view_demo/urls.py | e6f3d7b1c943bca748af711f0e39b48e0f559a19 | [] | no_license | yingkun1/python-django | a3084460a83682f3e0848d5b40c881f93961ecc2 | 08c9ed3771eb245ee9ff66f67cf28730d2675bbe | refs/heads/master | 2022-12-11T12:33:20.788524 | 2019-06-12T09:30:59 | 2019-06-12T09:30:59 | 189,977,625 | 1 | 0 | null | 2022-11-22T02:57:01 | 2019-06-03T09:43:30 | Python | UTF-8 | Python | false | false | 1,372 | py | """class_view_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index,name="index"),
path('book/',views.BookListView.as_view(),name="book_list"),
path('add_book/',views.AddBookView.as_view(),name="add_book"),
path('book_detail/<book_id>/',views.BookDetailView.as_view(),name="book_detail"),
#以后如果渲染的这个模板不需要传递任何的参数,那么建议在urls中使用TemplateView
# path('about/',TemplateView.as_view(template_name="about.html"))
path('about/',views.AboutView.as_view(),name="about"),
path('article/',include("front.urls"))
]
| [
"925712087@qq.com"
] | 925712087@qq.com |
449468d693eff6360da1441d3305fb079152cb99 | 5fb32bc4f1de0dfd2fa22bb92108b27386d77298 | /tools/workspace/gst-plugins-ugly/repository.bzl | 2d1a8de42c4912fa94012d10f973b2255d0feff6 | [
"Apache-2.0"
] | permissive | mjbots/bazel_deps | 5415e61324c6167cba7c3c0917cad387d9e5107a | 6c9ba1867b5d0ab5e59a7f1205adfd750a6c3610 | refs/heads/master | 2023-07-24T05:19:51.945623 | 2023-07-12T18:22:50 | 2023-07-12T18:22:50 | 139,143,430 | 96 | 38 | Apache-2.0 | 2021-01-05T13:02:09 | 2018-06-29T11:52:09 | Python | UTF-8 | Python | false | false | 1,109 | bzl | # -*- python -*-
# Copyright 2018 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def gst_plugins_ugly_repository(name):
http_archive(
name = name,
urls = [
"https://gstreamer.freedesktop.org/src/gst-plugins-ugly/gst-plugins-ugly-1.14.1.tar.xz",
],
sha256 = "cff2430bb13f54ef81409a0b3d65ce409a376d4a7bab57a14a97d602539fe1d3",
strip_prefix = "gst-plugins-ugly-1.14.1",
build_file = Label("//tools/workspace/gst-plugins-ugly:package.BUILD"),
)
| [
"jjp@pobox.com"
] | jjp@pobox.com |
bd32ed5d6d14f48b505ab51db2fcca8dee046b1f | 65b55130f41747ccb239219ae9010ab06b60d430 | /src/tweets/api/pagination.py | c8bb1934c835473c8b741cc39a8030efc01de161 | [] | no_license | amrebrahem22/TweetMe-App | d5c2f5fc20565356a88fdde357433ac54bc5dfac | cad027a34c84f9b2530759ec6b080a5f80a02ffc | refs/heads/master | 2020-11-24T19:12:27.526977 | 2020-03-24T21:44:30 | 2020-03-24T21:44:30 | 228,306,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from rest_framework import pagination
class TweetsPagination(pagination.PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 10000 | [
"amrebrahem226@gmail.com"
] | amrebrahem226@gmail.com |
1ee7bc3cf79a503523f42dcbf8538f749df6872e | ae646229187ab11607e4889e1cf0e380b26fae5c | /test_joyce_code/data/buildSupplementDict.py | 00c80ec76f7df5ab019c7877bde0a4d4d5e6aed3 | [] | no_license | aschein/tensor_analysis | cb60caf56713cfb7191c46d3cc20c32ea591d382 | 155754be7fa8cfb97432997cb66aa37b1a7b582b | refs/heads/master | 2021-01-17T07:44:00.657311 | 2014-09-11T20:45:14 | 2014-09-11T20:45:14 | 34,183,143 | 1 | 2 | null | 2018-08-25T20:15:18 | 2015-04-18T21:19:08 | Python | UTF-8 | Python | false | false | 1,680 | py | '''
Scrape a list of supplements and herbs
'''
import urllib2
from bs4 import BeautifulSoup
import json
import string
def scrapeNIH():
"""
Function to scrape MedlinePlus Herbs & Supplements Page:
http://www.nlm.nih.gov/medlineplus/druginfo/herb_All.html
"""
supplements = []
PAGE_URL = "http://www.nlm.nih.gov/medlineplus/druginfo/herb_All.html"
soup = BeautifulSoup(urllib2.urlopen(PAGE_URL).read())
ulList = soup.find_all('ul', 'herbul')
for ul in ulList:
for li in ul.findAll('li'):
supplements.append(li.find('a').getText().lower())
print li.find('a').getText()
supplements = list(set(supplements))
return supplements
def scrapeRXList():
"""
Function to scrape rxlist for their classified supplements
"""
supplementDict = {}
PAGE_URLS = ["http://www.rxlist.com/supplements/alpha_"+i+".html" for i in string.lowercase]
for page in PAGE_URLS:
print "Scraping page:" + str(page)
soup = BeautifulSoup(urllib2.urlopen(page).read())
contentMaterial = soup.find_all('div', 'contentstyle')
for li in contentMaterial[0].findAll('li'):
txt = li.find('a').getText() + ' '
## try to encode it in ascii
txt = txt.encode('ascii', 'ignore').lower()
suppClass = str(txt)
if txt.find("("):
suppClass = txt[txt.rfind("(")+1:txt.find(")")]
txt = txt[:txt.find("(")].strip()
supplementDict[txt] = suppClass
## make sure all the values are keys themselves
vals = supplementDict.values()
valDict = zip(vals, vals)
supplementDict.update(valDict)
return supplementDict
def main():
supplements = scrapeRXList()
with open('supplement.json', 'wb') as outfile:
json.dump(supplements, outfile)
if __name__ == "__main__":
main() | [
"robchen401@gmail.com"
] | robchen401@gmail.com |
fff06f4add7041f373fa5d4e1126cde49020c91b | 933f2a9f155b2a4f9746bf2020d1b828bfe49e81 | /面向对象编程/day1/__init__.py | fef6889ac4b4519d0d8df9ec966f87b8ff5b113e | [] | no_license | WuAlin0327/python3-notes | d65ffb2b87c8bb23d481ced100d17cda97aef698 | 1d0d66900f6c4b667b3b84b1063f24ee7823e1bb | refs/heads/master | 2020-03-26T04:49:34.937700 | 2018-12-31T11:12:58 | 2018-12-31T11:12:58 | 144,524,404 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | class LuffyStudent:
school = 'luffycity' #数据属性
def __init__(self,name,sex,age):# __init__方法用来对象定制对象独有的特征
self.Name = name
self.Sex = sex
self.Age = age
def learn(self):# 函数属性
print('is learing')
def eat(self):
print('is eating')
# 后产生的对象
stu1 = LuffyStudent('wualin','man',29)#LuffyStudent.__init__(stu1,'wualin','man',20)
# 加上__init__方法后,实例化步骤
'''
1.txt. 先产生一个空对象stu1
2. 触发LuffyStudent.__init__(stu1,'wualin','man',29)
'''
#
# # 查
# print(stu1.Name)
# print(stu1.Sex)
# print(stu1.Age)
# # 改
# stu1.Name='520'
#
# # 删除
# del stu1.Name
#
# # 增
# stu1.class_name = 'python开发'
| [
"1032298871@qq.com"
] | 1032298871@qq.com |
b16288bd80014d9349042292618ccd0a8980cd5e | bd0fe4df0e442b02add84ae12f932a0e5511b2f5 | /product/context_processors.py | 6f296e1c0f3b727473691d21b4b4eea8a67bc7b1 | [] | no_license | yeboahd24/Simple-Ecommerce | a1bdae28ec192f2f80ee1ef615dd614e3fd4aca7 | 0cabae9f968da7d176748b0cb4feb5b7e7b9e9ab | refs/heads/main | 2023-08-02T01:01:41.791169 | 2021-09-30T20:06:35 | 2021-09-30T20:06:35 | 374,789,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from product.models import Category
def menu_categories(request):
categories = Category.objects.all()
return {'menu_categories': categories} | [
"yeboahd24@gmail.com"
] | yeboahd24@gmail.com |
a3eafbc37dcd3a3bdd09cd7591251894aafe9927 | 1eefb6b82b8e8aac088da9d6e9ff40d235885b5c | /misc/local_occu_to_world_map.py | 8a71f32b768b3de9a9145541450815efe746f9af | [
"Apache-2.0"
] | permissive | danielchandg/ROAR | be513500ba9a44676ed75188933e45512c7f5bfc | a661fbf42cf72c2a8c24ec89a0fb84e77e6af561 | refs/heads/main | 2023-03-28T16:09:44.514515 | 2021-03-31T22:20:11 | 2021-03-31T22:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | from pathlib import Path
import numpy as np
import cv2
import glob
import os
from scipy import sparse
def load_meta_data(f_path: Path) -> np.ndarray:
assert f_path.exists(), f"{f_path} does not exist"
return np.load(f_path.as_posix())
def create_global_occu_map(meta_data: np.ndarray, local_occu_map_dir_path: Path, regex: str) -> np.ndarray:
assert local_occu_map_dir_path.exists(), f"{local_occu_map_dir_path} does not exist"
min_x, min_y, max_x, max_y, map_additiona_padding = meta_data
x_total = max_x - min_x + 2 * map_additiona_padding
y_total = max_y - min_y + 2 * map_additiona_padding
curr_map = np.zeros(shape=(x_total, y_total),
dtype=np.float16)
file_paths = sorted(glob.glob((local_occu_map_dir_path.as_posix() + regex)), key=os.path.getmtime)
for fpath in file_paths:
data = sparse.load_npz(fpath).toarray()
# data = np.load(fpath)
curr_map = np.logical_or(data, curr_map)
visualize(curr_map)
return curr_map
def visualize(m: np.ndarray, wait_key=1):
m = np.float32(m)
cv2.imshow("map", cv2.resize(m, dsize=(500, 500)))
cv2.waitKey(wait_key)
if __name__ == "__main__":
meta_data_folder_path = Path("../data/output/occupancy_map/")
meta_data_file_path = meta_data_folder_path / "meta_data.npy"
try:
meta_data: np.ndarray = load_meta_data(meta_data_file_path)
global_occu_map = create_global_occu_map(meta_data, meta_data_folder_path, regex="/03_*.npz")
print("Press any key to exit")
visualize(global_occu_map, wait_key=0)
except Exception as e:
meta_data = np.array([-550, -550, 550, 550, 40])
np.save(meta_data_file_path.as_posix(), meta_data)
print(f"Meta data {meta_data} Saved")
| [
"wuxiaohua1011@berkeley.edu"
] | wuxiaohua1011@berkeley.edu |
db1befcc09293bbdb4053444eb972c6ea37f890f | b87f66b13293782321e20c39aebc05defd8d4b48 | /convert/h52txt.py | a546ad8345b9bdc9034fc1027919ae1440a430dc | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | """
Convert 2D array HDF5 files to raw ASCII format.
Fernando Paolo <fpaolo@ucsd.edu>
January 1, 2010
"""
import numpy as np
import tables as tb
import os
import sys
files = sys.argv[1:]
if len(files) < 1:
print 'usage: python %s infiles.txt' % sys.argv[0]
sys.exit()
print 'converting files: %d... ' % len(files)
for f in files:
h5f = tb.openFile(f, 'r')
data = h5f.root.data.read()
h5f.close()
np.savetxt(os.path.splitext(f)[0] + '.txt', data, fmt='%f')
print 'done!'
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
ab72ee0b427d9eb3f188b6eab9a7fa7f2fe882d9 | e15653ec81af4e6ee2e46e966bcef3e82ca40337 | /examples/fiv.py | adc1c5934881cac06dfedd23ee84c8a897afea31 | [] | no_license | ktdreyer/rhcephbugs | fab340619cf16cce2b45200c5a57e9a9087f82a2 | 72601a61393b3df4dd3ad4f2996cfba3b7f2b086 | refs/heads/master | 2023-04-27T10:00:58.049310 | 2023-04-13T20:57:30 | 2023-04-13T21:06:47 | 80,675,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import re
from rhcephbugs.fixed_in_version import FixedInVersion, Build
import bugzilla
# Pretend these values came from CI:
branch = 'ceph-2-rhel-7'
build = 'ceph-10.2.5-1.el7cp'
# Pretend this bug number came from a library that can parse CI messages for
# rhbz ID numbers:
ids = [1367539]
BZURL = 'partner-bugzilla.redhat.com'
# BZURL = 'bugzilla.redhat.com'
def get_distro(branch):
if re.search('-rhel-\d+$', branch):
return 'RHEL'
if re.search('-(?:ubuntu|trusty|xenial)$', branch):
return 'Ubuntu'
raise RuntimeError('unknown distro in branch %s' % branch)
def update_fiv(bzapi, ids, build):
bugs = bzapi.getbugs(ids, include_fields=['id', 'fixed_in'])
for bug in bugs:
url = 'https://%s/%d' % (BZURL, bug.id)
fiv = FixedInVersion(bug.fixed_in)
new = Build.factory(build, get_distro(branch))
fiv.update(new)
if bug.fixed_in == str(fiv):
print('%s Fixed In Version is already set to "%s"' % (url, fiv))
continue
print('%s changing Fixed In Version "%s" to "%s"' % (url, bug.fixed_in,
fiv))
update = bzapi.build_update(fixed_in=str(fiv))
bzapi.update_bugs(bug.id, update)
if __name__ == '__main__':
bzapi = bugzilla.Bugzilla(BZURL)
if not bzapi.logged_in:
raise SystemExit('Not logged into %s. See ~/.bugzillatoken.' % BZURL)
update_fiv(bzapi, ids, build)
| [
"kdreyer@redhat.com"
] | kdreyer@redhat.com |
7981bbedcf212c97629525d6fabc949ce97fad7a | 10e1c07d665f9304d5ffd7033c64a164ea2a3ad9 | /Django_Backend/AuthUser/migrations/0002_auto_20181027_1939.py | a48988b7ec0e123dcbed5c9a9b1da32570d1e24a | [] | no_license | garvitkataria/AI_Hackathon_Server_Code | fba56605b25f4698110ebf92aa21809ebdcec462 | 1feee4122615bf0d1384889625a62db84c9ddb8b | refs/heads/master | 2023-01-11T11:26:00.619674 | 2019-09-29T06:09:07 | 2019-09-29T06:09:07 | 207,013,287 | 0 | 0 | null | 2022-12-31T03:04:05 | 2019-09-07T19:00:28 | Python | UTF-8 | Python | false | false | 358 | py | # Generated by Django 2.1.2 on 2018-10-27 19:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('AuthUser', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='is_faculty',
new_name='is_farmer',
),
]
| [
"garvit.k16@iiits.in"
] | garvit.k16@iiits.in |
664956ece2799c6d70ff0cd93d28d24230d21daa | afe5c625d818a85598785b43089b65ebf950cb15 | /template_lib/examples/test_graphviz.py | 90c7ba54ab8c3b08038a44ab62f903b40e416cb1 | [] | no_license | weroks/Omni-GAN-DGP | 52039dafa041bd977debba69c1a1d30094e8bfcc | d85898d0634c63f3176a21e3e398aea0a88f2634 | refs/heads/main | 2023-08-20T06:33:18.307812 | 2021-10-28T14:17:00 | 2021-10-28T14:17:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,942 | py | import os
import sys
import unittest
import argparse
from template_lib.examples import test_bash
from template_lib import utils
class TestingGraphviz(unittest.TestCase):
def test_hello(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
g = Digraph('G', filename=filename, format='png')
g.edge('Hello', 'World')
g.view()
def test_process(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
from graphviz import Graph
g = Graph('G', filename=filename, format='png')
g.edge('run', 'intr')
g.edge('intr', 'runbl')
g.edge('runbl', 'run')
g.edge('run', 'kernel')
g.edge('kernel', 'zombie')
g.edge('kernel', 'sleep')
g.edge('kernel', 'runmem')
g.edge('sleep', 'swap')
g.edge('swap', 'runswap')
g.edge('runswap', 'new')
g.edge('runswap', 'runmem')
g.edge('new', 'runmem')
g.edge('sleep', 'runmem')
g.view()
def test_fsm(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
from graphviz import Digraph
f = Digraph('finite_state_machine', filename=filename, format='png')
f.attr(rankdir='LR', size='8,5')
f.attr('node', shape='doublecircle')
f.node('LR_0')
f.node('LR_3')
f.node('LR_4')
f.node('LR_8')
f.attr('node', shape='circle')
f.edge('LR_0', 'LR_2', label='SS(B)')
f.edge('LR_0', 'LR_1', label='SS(S)')
f.edge('LR_1', 'LR_3', label='S($end)')
f.edge('LR_2', 'LR_6', label='SS(b)')
f.edge('LR_2', 'LR_5', label='SS(a)')
f.edge('LR_2', 'LR_4', label='S(A)')
f.edge('LR_5', 'LR_7', label='S(b)')
f.edge('LR_5', 'LR_5', label='S(a)')
f.edge('LR_6', 'LR_6', label='S(b)')
f.edge('LR_6', 'LR_5', label='S(a)')
f.edge('LR_7', 'LR_8', label='S(b)')
f.edge('LR_7', 'LR_5', label='S(a)')
f.edge('LR_8', 'LR_6', label='S(b)')
f.edge('LR_8', 'LR_5', label='S(a)')
f.view()
def test_cluster(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
from graphviz import Digraph
g = Digraph('G', filename=filename, format='png')
# NOTE: the subgraph name needs to begin with 'cluster' (all lowercase)
# so that Graphviz recognizes it as a special cluster subgraph
with g.subgraph(name='cluster_0') as c:
c.attr(style='filled', color='lightgrey')
c.node_attr.update(style='filled', color='white')
c.edges([('a0', 'a1'), ('a1', 'a2'), ('a2', 'a3')])
c.attr(label='process #1')
with g.subgraph(name='cluster_1') as c:
c.attr(color='blue')
c.node_attr['style'] = 'filled'
c.edges([('b0', 'b1'), ('b1', 'b2'), ('b2', 'b3')])
c.attr(label='process #2')
g.edge('start', 'a0')
g.edge('start', 'b0')
g.edge('a1', 'b3')
g.edge('b2', 'a3')
g.edge('a3', 'a0')
g.edge('a3', 'end')
g.edge('b3', 'end')
g.node('start', shape='Mdiamond')
g.node('end', shape='Msquare')
g.view()
def test_rank_same(self):
"""
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if 'PORT' not in os.environ:
os.environ['PORT'] = '6006'
if 'TIME_STR' not in os.environ:
os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
# func name
assert sys._getframe().f_code.co_name.startswith('test_')
command = sys._getframe().f_code.co_name[5:]
class_name = self.__class__.__name__[7:] \
if self.__class__.__name__.startswith('Testing') \
else self.__class__.__name__
outdir = f'results/{class_name}/{command}'
import shutil
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
from graphviz import Digraph
filename = os.path.join(outdir, 'hello')
d = Digraph('G', filename=filename, format='png')
with d.subgraph() as s:
s.attr(rank='same')
s.node('A')
s.node('X')
d.node('C')
with d.subgraph() as s:
s.attr(rank='same')
s.node('B')
s.node('D')
s.node('Y')
d.edges(['AB', 'AC', 'CD', 'XY'])
d.view()
| [
"zhoupengcv@sjtu.edu.cn"
] | zhoupengcv@sjtu.edu.cn |
4b3e7223a2519962a38b27c71678023ccb425d4a | d02508f5ebbbdb4ba939ba830a8e8d9abc69774a | /Implementation/beautifulTriplets.py | 6d68d2dc8232b8c956171e12e015bb5b1dc96efe | [] | no_license | sameersaini/hackerank | e30c6270aaa0e288fa8b25392819509849cdabad | 3e66f89e02ade703715237722eda2fa2b135bb79 | refs/heads/master | 2021-06-12T09:24:15.266218 | 2019-10-18T02:22:00 | 2019-10-18T02:22:00 | 31,360,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | #!/bin/python3
import os
# Complete the beautifulTriplets function below.
def beautifulTriplets(d, arr):
return len([1 for number in arr if number + d in arr and number + 2*d in arr])
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
arr = list(map(int, input().rstrip().split()))
result = beautifulTriplets(d, arr)
fptr.write(str(result) + '\n')
fptr.close() | [
"sameersaini40@gmail.com"
] | sameersaini40@gmail.com |
bda379d481f9d5aa07a6d4fcb8d7ab28d72843c6 | a2b20597759990445081057d35d113434cfcf970 | /stubs/typeshed/typeshed/stdlib/multiprocessing/queues.pyi | 7ba17dcfbe0583d4395d2ede1a5f005137d3083b | [
"MIT",
"Apache-2.0"
] | permissive | facebook/pyre-check | 34059599c02b65605c574f13555229f3b931fd4e | fe8ccedc572cc1faa1fd01e9138f65e982875002 | refs/heads/main | 2023-09-03T19:10:11.587028 | 2023-09-02T07:40:35 | 2023-09-02T07:40:35 | 110,274,488 | 6,703 | 575 | MIT | 2023-09-13T17:02:32 | 2017-11-10T17:31:36 | OCaml | UTF-8 | Python | false | false | 1,238 | pyi | import queue
import sys
from typing import Any, Generic, TypeVar
if sys.version_info >= (3, 9):
from types import GenericAlias
__all__ = ["Queue", "SimpleQueue", "JoinableQueue"]
_T = TypeVar("_T")
class Queue(queue.Queue[_T]):
# FIXME: `ctx` is a circular dependency and it's not actually optional.
# It's marked as such to be able to use the generic Queue in __init__.pyi.
def __init__(self, maxsize: int = 0, *, ctx: Any = ...) -> None: ...
def get(self, block: bool = True, timeout: float | None = None) -> _T: ...
def put(self, obj: _T, block: bool = True, timeout: float | None = None) -> None: ...
def put_nowait(self, item: _T) -> None: ...
def get_nowait(self) -> _T: ...
def close(self) -> None: ...
def join_thread(self) -> None: ...
def cancel_join_thread(self) -> None: ...
class JoinableQueue(Queue[_T]): ...
class SimpleQueue(Generic[_T]):
def __init__(self, *, ctx: Any = ...) -> None: ...
if sys.version_info >= (3, 9):
def close(self) -> None: ...
def empty(self) -> bool: ...
def get(self) -> _T: ...
def put(self, item: _T) -> None: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ad4f84c5b22032880bc78e204e2fddfdeba69aec | bec2ccc5f19575518649932fb3f2853adf54c11e | /blog/static_file/static_file/static_file/myapp/templatetags/myfilter.py | 6f40debee780c4c407a83b4bc88233fa4d5bc915 | [] | no_license | liuxinqiqi/djangosite | 08831c63c5fa5a4c8a14dd4bf8beed62138eb58a | 9a1b425cbdb73feb34d7fb1f60c3f2923e262d64 | refs/heads/master | 2022-12-13T11:00:07.039593 | 2017-08-12T08:40:03 | 2017-08-12T08:40:03 | 100,082,409 | 0 | 0 | null | 2022-12-08T00:43:01 | 2017-08-12T01:55:14 | JavaScript | UTF-8 | Python | false | false | 388 | py | # coding=utf-8
from django import template
register = template.Library()
# 定义一个将日期中的月份转换为大写的过滤器,如8转换为八
# @register.filter
def month_to_upper(key):
return ['一', '二', '三', '四', '五', '六', '七', '八', '九', '十', '十一', '十二'][key.month-1]
# 注册过滤器
register.filter('month_to_upper', month_to_upper)
| [
"XinQi_Liu@outlook.com"
] | XinQi_Liu@outlook.com |
3e8113768b6f7f970999769e54013464cfb82d4d | 42a0760a051935b2e765d57c445235221a28f49e | /problemSets/top75/383.py | 46c7213de37ce6088699552420409617f18ed531 | [] | no_license | Th3Lourde/l33tcode | 3bea3a3e7c633a2d5a36f7d76d5d776d275d8ee3 | eb6b11f97a022b66716cb3890cc56c58f62e8aa4 | refs/heads/master | 2022-12-22T19:05:04.384645 | 2022-12-18T19:38:46 | 2022-12-18T19:38:46 | 232,450,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | class Solution:
def canConstruct(self, ransomNote, magazine):
d = {}
for chr in magazine:
if chr in d:
d[chr] += 1
else:
d[chr] = 1
for chr in ransomNote:
if chr in d:
if d[chr] > 0:
d[chr] -= 1
else:
return False
else:
return False
return True
| [
"th3lourde@Eli.local"
] | th3lourde@Eli.local |
6e8233795f475c68be39cde2527a85c241ecb3fa | b9b15de6abaf44d14f94cacbc7a0df4c66ea7c83 | /43.py | 51e58b3b564b7f82849a4241918883925cabe040 | [] | no_license | humachine/pe | 96ded174431031e4ca7c9c83401495148257b903 | 01d8b33174f4e100838d040c1bd401e066bb768a | refs/heads/master | 2020-04-06T06:23:54.672102 | 2015-07-28T22:40:10 | 2015-07-28T22:40:10 | 38,980,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import itertools
N=['0','1','3','4','6']
a=list(itertools.permutations(N))
total=0
for i in a:
j=''.join(i)
if j[0]=='0':
continue
if int(j[3]) % 2 !=0:
continue
if int(j[2:5]) % 3 !=0:
continue
if int(j[-1]+'57') % 3 !=0:
continue
print j
| [
"swarun@gmail.com"
] | swarun@gmail.com |
9d2f30bb06a792619fb6a794b828b783ff70cbed | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2397/60618/320984.py | 9a61980142d5804459be4101d887d5bb42b2f36c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | a=int(input())
b=int(input())
if a==3 and b==19:
print(17)
elif a==7:
print(15)
elif a==12:
print(15)
elif a==3:
print(32)
elif a==1:
print(4)
elif a==15:
print(704)
elif a==32:
print(10)
else:
print(a)
#print(17)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
c9ac7a3e3c7954ce57d886098b7c8e74f60df27b | 3d9eb7e24090adff31862a3e6614217d76ff60f7 | /testing/sample.py | 3ce9262b13f8890eafc45edb9d1db51e909e5cf8 | [] | no_license | asvetlov/articles | 4ec342d3346b77897b4d2ecf3c1eb170859edabb | e9a8d35e755221a0b22c8a99b8680e5ef8baa80f | refs/heads/master | 2016-09-08T01:59:11.800396 | 2015-06-03T08:46:54 | 2015-06-03T08:46:54 | 19,406,486 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from datetime import datetime, timedelta
class Billing(object):
now = datetime.now
def __init__(self):
self.timestamp = self.now()
def can_show(self):
return self.now() - self.timestamp < timedelta(seconds=5)
#### Test
import unittest
import mocker
class TestBillling(unittest.TestCase):
def setUp(self):
self.mocker = mocker.Mocker()
def tearDown(self):
self.mocker = None
def test_can_show(self):
billing = Billing()
now = self.mocker.mock()
stamp = billing.timestamp
billing.now = now
# mocker setup
with self.mocker.order():
# first call - just now
now()
self.mocker.result(stamp)
# after 4 seconds
now()
self.mocker.result(stamp + timedelta(seconds=4))
# after next 4 seconds
now()
self.mocker.result(stamp + timedelta(seconds=8))
# test replay
with self.mocker:
# first call
self.assertEqual(True, billing.can_show())
# second call
self.assertEqual(True, billing.can_show())
# third call
self.assertEqual(False, billing.can_show())
unittest.main()
| [
"andrew.svetlov@gmail.com"
] | andrew.svetlov@gmail.com |
3c47065803c2c70d16235f16edd5bb25405c0b57 | 1e12a6f1957dc47c50845a39d626ea9a1a541268 | /backend/articles/urls.py | 82913153c2c86d190d1335b0ed5e486cb3d0834f | [] | no_license | sungguenja/fincat-findog | 6e7d276bcd8853300916987f70b0d159ba5cff4d | c62d17f64f4f1e8d86a982feb4842d3729b587c5 | refs/heads/master | 2023-01-03T02:11:56.612927 | 2020-10-24T16:47:48 | 2020-10-24T16:47:48 | 306,927,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
path('city/', views.city_list, name='city'),
path('borough/', views.borough_list, name='borough'),
path('species/<int:animal_pk>/', views.species_list, name='species'),
path('animal/', views.animal_list, name='animal'),
path('myarticles/', views.article_list, name="article_list"),
path('search_api/',views.search_api, name="search_api"),
]
| [
"59605197+sungguenja@users.noreply.github.com"
] | 59605197+sungguenja@users.noreply.github.com |
933927f9173118bff7796bb05e32536c956cf44d | b0bd3342c244ebf30ae5ab29daa078f2b39010f7 | /EmbedModel.py | e4d22900e04f20f54fcf83aa983ee4d2d26e26bb | [] | no_license | naiqili/itime_learning | 30a8af7f1234277162ccdd4c69cd9f9a4a7ab412 | d9b191bb32a7e49cb99443d7dccea5bb392aee90 | refs/heads/master | 2021-06-19T04:54:06.239320 | 2017-06-26T13:35:39 | 2017-06-26T13:35:39 | 92,792,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,151 | py | import tensorflow as tf
import numpy as np
class EmbedModel():
def __init__(self, conf):
self.conf = conf
self.uif_mat = np.load(conf.uif_path)
self.embed_user_mat = np.load("%sembed_user.npy" % conf.embedPath)
self.embed_item_mat = np.load("%sembed_item.npy" % conf.embedPath)
self.context_user_mat = np.load("%scontext_user.npy" % conf.embedPath)
self.context_item_mat = np.load("%scontext_item.npy" % conf.embedPath)
self.feat_mat = np.load("%sfeat_mat.npy" % conf.featMatDir)
def add_variables(self, reuse=False):
conf = self.conf
with tf.variable_scope('Fixed', reuse=reuse):
self.uif = tf.get_variable('uif',
[conf.user_size,
conf.item_size,
len(conf.recAlgos)],
initializer=tf.constant_initializer(self.uif_mat),
trainable=False)
self.embed_user = tf.get_variable('embed_user',
[conf.user_size,
conf.embed_size],
initializer=tf.constant_initializer(self.embed_user_mat),
trainable=False)
self.embed_item = tf.get_variable('embed_item',
[conf.item_size,
conf.embed_size],
initializer=tf.constant_initializer(self.embed_item_mat),
trainable=False)
self.context_user = tf.get_variable('context_user',
[conf.user_size,
conf.embed_size],
initializer=tf.constant_initializer(self.context_user_mat),
trainable=False)
self.context_item = tf.get_variable('context_item',
[conf.item_size,
conf.embed_size],
initializer=tf.constant_initializer(self.context_item_mat),
trainable=False)
self.feat_embed = tf.get_variable('feat',
[conf.item_size,
conf.feat_size],
initializer=tf.constant_initializer(self.feat_mat),
trainable=False)
if self.conf.drop_embed:
self.embed_user = tf.contrib.layers.dropout(self.embed_user, self.conf.keep_prob, is_training=self.conf.is_training)
self.embed_item = tf.contrib.layers.dropout(self.embed_item, self.conf.keep_prob, is_training=self.conf.is_training)
self.context_user = tf.contrib.layers.dropout(self.context_user, self.conf.keep_prob, is_training=self.conf.is_training)
self.context_item = tf.contrib.layers.dropout(self.context_item, self.conf.keep_prob, is_training=self.conf.is_training)
self.item_joint_embed = tf.concat([self.embed_item, self.context_item], 1)
self.user_joint_embed = tf.concat([self.embed_user, self.context_user], 1)
self.item_feat_joint_embed = tf.concat([self.item_joint_embed, self.feat_embed], 1)
with tf.variable_scope('Weights', reuse=reuse):
self.v1 = tf.get_variable('v1',
[len(conf.recAlgos), 1])
self.v2 = tf.get_variable('v2',
[conf.z_size, 1])
self.W_z = tf.get_variable('W_z',
[conf.z_size,
2*conf.embed_size+conf.feat_size,
2*conf.embed_size+conf.feat_size])
self.W_rel = tf.get_variable('W_rel',
[2*conf.embed_size,
2*conf.embed_size])
self.ph_selected_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_all_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_groundtruth = tf.placeholder(tf.int32, shape=[])
self.ph_user = tf.placeholder(tf.int32, shape=[])
def build_model(self):
uif_u = self.uif[self.ph_user]
if self.conf.drop_matrix:
uif_u = tf.contrib.layers.dropout(uif_u, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
rel_score1 = tf.matmul(uif_u, self.v1)
user_embed_u = tf.expand_dims(tf.nn.embedding_lookup(self.user_joint_embed, self.ph_user), 1)
rel_score2 = tf.matmul(tf.matmul(self.item_joint_embed, self.W_rel), user_embed_u)
rel_score = rel_score1 + rel_score2
def fn_i0(): # (choices, score_sum) when i = 0
return (self.ph_all_items, tf.squeeze(rel_score))
def fn_not_i0(): # (choices, score_sum) when i != 0
selected_items = self.ph_selected_items
iur = self.item_feat_joint_embed
if self.conf.drop_matrix:
iur = tf.contrib.layers.dropout(iur, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
se = tf.nn.embedding_lookup(iur, selected_items)
se = tf.transpose(se)
# see test/einsum_test.py
iur_w = tf.einsum('nu,zud->znd', iur, self.W_z)
iur_w_se = tf.einsum('znu,uk->znk', iur_w, se)
mp_iur_w_se = tf.reduce_max(iur_w_se, axis=2) # z x n
mp_iur_w_se = tf.transpose(mp_iur_w_se) # n x z
mp_iur_w_se = tf.tanh(mp_iur_w_se)
div_score = tf.matmul(mp_iur_w_se, self.v2) # n x 1
score_sum = tf.squeeze(rel_score + div_score) # vec of n
choices = tf.reshape(tf.sparse_tensor_to_dense(tf.sets.set_difference([self.ph_all_items], [selected_items])), [-1]) # vec of remaining choices
return (choices, score_sum)
i = tf.shape(self.ph_selected_items)[0]
choices, score_sum = tf.cond(tf.equal(i, 0),
lambda: fn_i0(),
lambda: fn_not_i0())
eff_score = tf.gather(score_sum, choices, validate_indices=False) # vec of choices
_argmax = tf.argmax(eff_score, axis=0)
_pred = tf.gather(choices, _argmax, validate_indices=False)
_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=score_sum, labels=self.ph_groundtruth)
self.loss = _loss
self.pred = _pred
self.loss_summary = tf.summary.scalar('Loss', self.loss)
if self.conf.is_training:
self.train_op = tf.train.AdamOptimizer(self.conf.lr).minimize(self.loss)
| [
"naiqil@student.unimelb.edu.au"
] | naiqil@student.unimelb.edu.au |
e29b0eccefaf4aa55fcb3248506c534cda082e6c | 0a0bf0c955e98ffebf0bee81496291e984366887 | /maxinai/letters/letters_service_adv.py | b5cdf1e213ec9d84db3b0976a28c42d43b42af98 | [] | no_license | MaxinAI/school-of-ai | 11ee65c935638b8bb9f396f25c943bd6e8e7fc0f | 3c8f11ae6cb61df186d4dfa30fa5aba774bfbeba | refs/heads/master | 2023-01-22T17:24:33.208956 | 2023-01-20T14:49:40 | 2023-01-20T14:49:40 | 212,200,415 | 52 | 77 | null | 2020-04-10T07:15:06 | 2019-10-01T21:11:52 | Jupyter Notebook | UTF-8 | Python | false | false | 3,667 | py | """
Created on Nov 15, 2017
Service for model interface
@author: Levan Tsinadze
"""
import logging
import numpy as np
import PIL
import torch
from flask import Flask, json, render_template, request
from maxinai.letters.image_reader import request_file
from maxinai.letters.service_config import configure
from torch import nn, no_grad
from torchvision import transforms
logger = logging.getLogger(__name__)
tfms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
_PREDICTION_KEY = 'prediction'
# Initializes web container
app = Flask(__name__)
@torch.inference_mode()
class ModelWrapper(object):
"""Model wrapper for inference"""
def __init__(self, model: nn.Module, trfms: transforms):
self.model = model.eval()
self.trfms = trfms
@no_grad()
def forward(self, *imgs: PIL.Image) -> np.ndarray:
itns = torch.stack([self.trfms(x) for x in imgs])
otns = self.model(itns)
results = otns.cpu().data.numpy()
return results
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def init_wrapper():
"""
Load model from disk and initialize model wrapper
Returns:
wrapper: model wrapper
"""
net = torch.load(flags.model_path, map_location='cpu')
net.eval()
wrapper = ModelWrapper(net, tfms)
return wrapper
def recognize_image(image_data):
"""
Recognizes from binary image
Args:
image_data: binary image
Returns:
response_json: prediction response
"""
img = request_file(flags, image_data)
predictions = model(img)
predictions = np.argmax(predictions)
response_dict = {'geoletters': 'true',
_PREDICTION_KEY: class_names[predictions]}
response_json = json.dumps(response_dict)
return response_json
@app.route('/', methods=['GET', 'POST'])
def cnn_recognize():
"""Web method for recognition
Returns:
resp - recognition response
"""
if request.method == 'POST':
resp = recognize_image(request.data)
elif request.method == 'GET':
resp = render_template('index.html')
return resp
@app.route('/upload', methods=['GET', 'POST'])
def cnn_upload():
"""Recognizes uploaded images
Returns:
resp - recognition response
"""
return recognize_image(request.data)
def read_labels(flags):
"""Reads labels
Args:
flags - configuration parameters
Returns:
model_labels - labels JSON dictionary
"""
labels_file = flags.label_path
if labels_file is not None:
with open(labels_file, 'r') as fp:
model_labels = json.load(fp)
logger.debug('model_labels - ', model_labels)
else:
model_labels = {}
return model_labels
def load_labels(flags):
"""Reads labels JSON file
Args:
flags - configuration parameters
Returns:
tuple of -
labels_json - labels JSON with indices
class_names - class labels
"""
labels_json = read_labels(flags)
class_names = {
int(idx): class_name for idx, class_name in labels_json.items()}
logger.debug(class_names)
return labels_json, class_names
if __name__ == "__main__":
flags = configure()
logging.basicConfig(
level=logging.DEBUG if flags.verbose else logging.INFO)
model = init_wrapper()
_, class_names = load_labels(flags)
flags.num_classes = len(class_names) if len(
class_names) > 0 else flags.num_classes
app.run(host=flags.host, port=flags.port, threaded=True)
| [
"levantsinadze@gmail.com"
] | levantsinadze@gmail.com |
907cf454e454edb4d7c50fb9a5aaab80dda21d54 | c3c7398ec14865ea34c7f03aa5e012ddb19f0d5b | /app/forms.py | 63ce71ce50793d33f24d2f80063d2d67c44f9eda | [] | no_license | mzm5466/blog | 0e022f0ce85a0079cb72ffd9f472c7684f94d9fb | 13625fe7028a0df11a30d7de32751e34d681de00 | refs/heads/master | 2021-01-23T16:51:58.296591 | 2018-11-17T06:05:50 | 2018-11-17T06:05:50 | 102,748,039 | 0 | 0 | null | 2018-11-12T23:28:57 | 2017-09-07T14:36:32 | JavaScript | UTF-8 | Python | false | false | 195 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
from django.forms import ModelForm
from app.models import Moment
class MomentForm(ModelForm):
class Meta:
model=Moment
fields='__all__' | [
"you@example.com"
] | you@example.com |
b730e53bfea1689a32bc9d152957504af28beb0c | e24a007cba8cc63dbc29699e2651fbf27b3e7644 | /알고리즘4일차_0206/당근.py | 259c6bd059c2a66f2002ab4aa9f53f073c8bdd9a | [] | no_license | doyeon-kim-93/algorithm | 14df56481a727651a772cbaed7c7dec90fe38b14 | a706e55a6c5933f4901de5832cb0066cdb4665c3 | refs/heads/master | 2023-04-26T17:40:39.598842 | 2021-05-18T14:27:56 | 2021-05-18T14:27:56 | 241,492,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | T = int(input())
for tc in range(1,T+1):
N = int(input())
carrot = list(map(int,input().split()))
result = []
for i in range(N-1):
sum1 = 0
sum2 = 0
for j in range(i+1):
sum1 += carrot[j]
for z in range(i+1,N):
sum2 += carrot[z]
result.append(abs(sum1-sum2))
result2 = min(result)
idx=result.index(result2)
print(idx+1, result2) | [
"kdymay93@gmail.com"
] | kdymay93@gmail.com |
761730fd57eab2fb14f011bc79f7b6a26af7c9d4 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/DP/SubarrayProductLessThanK.py | f0441c2ed62e7ca890c7f6b1bd76bfb2d20a4f91 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,645 | py | """
Your are given an array of positive integers nums.
Count and print the number of (contiguous) subarrays where the product of all the elements in the subarray is less than k.
Example 1:
Input: nums = [10, 5, 2, 6], k = 100
Output: 8
Explanation: The 8 subarrays that have product less than 100 are: [10], [5], [2], [6], [10, 5], [5, 2], [2, 6], [5, 2, 6].
Note that [10, 5, 2] is not included as the product of 100 is not strictly less than k.
Note:
0 < nums.length <= 50000.
0 < nums[i] < 1000.
0 <= k < 10^6.
思路 Dp,处理下 1 即可。 不考虑 0,nums[i] 不会为 0。
beat 19%
测试地址:
https://leetcode.com/problems/subarray-product-less-than-k/description/
可剪枝优化。
"""
c.. Solution o..
___ numSubarrayProductLessThanK nums, k
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
dp # list
result = 0
start = 0
___ i __ r..(l..(nums)):
__ nums[i] < k:
result += 1
dp = [nums[i]]
start = i
______
___ i __ r..(start+1, l..(nums)):
__ nums[i] __ 1 a.. nums[i] < k:
dp.a.. 1)
result += l..(dp)
c_
new # list
__ nums[i] < k:
result += 1
new.a.. nums[i])
___ j __ dp:
__ j * nums[i] < k:
result += 1
new.a.. j * nums[i])
dp = new
r_ result
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
6c4889f32d0f490fc3cecc4bc39dd6ac40054ac6 | 7b2dc269c3766deadb13415284d9848409d850c5 | /tests/test_load_arc.py | 86bb68bebcee5e99f44c5ef43586c441676d39cc | [] | no_license | Joaggi/demande | 8c3f32125cdf6377c9bd8a5b33bf162f8a5ec5cc | 289b8237d8e872e067dd4f6ab2297affe3903f4e | refs/heads/main | 2023-04-18T20:47:21.769183 | 2023-03-08T21:36:08 | 2023-03-08T21:36:08 | 611,455,062 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import pytest
from neuraldensityestimation.load_arc import load_arc
import matplotlib.pylab as plt
def test_load_arc():
X_train, X_train_density, X_test, X_test_density = load_arc(1000, 1000, 2)
plt.axes(frameon = 0)
plt.grid()
plt.scatter(X_test[:,0], X_test[:,1], c = X_test_density , alpha = .2, s = 3, linewidths= 0.0000001)
plt.colorbar()
plt.title('arc dataset')
plt.savefig('reports/arc_dataset.png',dpi = 300)
plt.show()
| [
"joaggi@gmail.com"
] | joaggi@gmail.com |
ad2247d3109cfc819cd987f6f07106d9d8927c6e | d68cb993f5011ac2f6fe6be298a14ba370d4a661 | /cleanrl/experiments/docker/aws/setup.py | 2cc53b8fb7517ca8775cc0a3d909a4b6c795e725 | [
"MIT"
] | permissive | lydia99992/cleanrl | b6cb196a11730e89068a179d27ec99ccc85e9be1 | 418bfc01fe69712c5b617d49d810a1df7f4f0c14 | refs/heads/master | 2022-10-21T08:30:00.561062 | 2020-06-15T18:18:16 | 2020-06-15T18:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | # pip install boto3
import boto3
import re
client = boto3.client('batch')
print("creating job queue")
response = client.create_job_queue(
jobQueueName='cleanrl',
state='ENABLED',
priority=100,
computeEnvironmentOrder=[
{
'order': 100,
'computeEnvironment': 'cleanrl'
}
]
)
print(response)
print("job queue created \n=============================")
# print("creating on demand job queue")
# response = client.create_job_queue(
# jobQueueName='cleanrl_ondemand',
# state='ENABLED',
# priority=101,
# computeEnvironmentOrder=[
# {
# 'order': 100,
# 'computeEnvironment': 'cleanrl_ondemand'
# }
# ]
# )
# print(response)
# print("on demand job queue created \n=============================")
print("creating job definition")
response = client.register_job_definition(
jobDefinitionName='cleanrl',
type='container',
containerProperties={
'image': 'vwxyzjn/cleanrl_shared_memory:latest',
'vcpus': 1,
'memory': 1000,
},
retryStrategy={
'attempts': 3
},
timeout={
'attemptDurationSeconds': 1800
}
)
print(response)
print("job definition created \n=============================")
| [
"costa.huang@outlook.com"
] | costa.huang@outlook.com |
8f44127439c751b32545cc6501eb7ad41de5abf3 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/string/string_template_missing.py | 35e360476f9a07060b26218ba1e06bb57963651a | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 300 | py | # -*- coding: utf-8 -*-
import string
values = {'var': 'foo'}
t = string.Template("$var is here but $missing is not provided")
try:
print('substitute() :', t.substitute(values))
except KeyError as err:
print('ERROR:', str(err))
print('safe_substitute():', t.safe_substitute(values))
| [
"350840291@qq.com"
] | 350840291@qq.com |
e08743e4fc371d5d885083bc88c8b5d9c32be2b2 | e916a80eba284b399f9bff3a1f4c676502946059 | /binary_tree_diameter.py | c12f5b5818703dae60f54443b7d824dbf603b0a7 | [] | no_license | valmsmith39a/u-data-structures-algorithms | 109e7d9345bbf19bfd5896bb72afb0020f67c39f | 26c2ce76f46fe43f8ea40314b69b41784c461c40 | refs/heads/master | 2020-12-12T20:25:18.490231 | 2020-05-24T23:22:50 | 2020-05-24T23:22:50 | 234,222,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py |
from queue import Queue
def diameter_of_binary_tree(root):
return diameter_of_binary_tree_func(root)[1]
def diameter_of_binary_tree_func(root):
if root is None:
return 0, 0
left_height, left_diameter = diameter_of_binary_tree_func(root.left)
right_height, right_diameter = diameter_of_binary_tree_func(root.right)
current_height = max(left_height, right_height) + 1
height_diameter = left_height + right_height
current_diameter = max(left_diameter, right_diameter, height_diameter)
return current_height, current_diameter
class BinaryTreeNode:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def convert_arr_to_binary_tree(arr):
"""
Takes arr representing level-order traversal of Binary Tree
"""
index = 0
length = len(arr)
if length <= 0 or arr[0] == -1:
return None
root = BinaryTreeNode(arr[index])
index += 1
queue = Queue()
queue.put(root)
while not queue.empty():
current_node = queue.get()
left_child = arr[index]
index += 1
if left_child is not None:
left_node = BinaryTreeNode(left_child)
current_node.left = left_node
queue.put(left_node)
right_child = arr[index]
index += 1
if right_child is not None:
right_node = BinaryTreeNode(right_child)
current_node.right = right_node
queue.put(right_node)
return root
def test_function(test_case):
arr = test_case[0]
solution = test_case[1]
root = convert_arr_to_binary_tree(arr)
output = diameter_of_binary_tree(root)
print(output)
if output == solution:
print("Pass")
else:
print("Fail")
arr = [1, 2, 3, 4, 5, None, None, None, None, None, None]
solution = 3
test_case = [arr, solution]
test_function(test_case)
arr = [1, 2, 3, 4, None, 5, None, None, None, None, None]
solution = 4
test_case = [arr, solution]
test_function(test_case)
arr = [1, 2, 3, None, None, 4, 5, 6, None, 7, 8, 9, 10,
None, None, None, None, None, None, 11, None, None, None]
solution = 6
test_case = [arr, solution]
test_function(test_case)
| [
"valmsmith39a@gmail.com"
] | valmsmith39a@gmail.com |
e86658d7feac073ee6a8dd2fa49c068b8e6e1086 | a617b546d29b144b6e951cefbfa41a72e9b38ddc | /data/add_stop.py | ccb700eca2a71c9224b5c36950d4d52886071166 | [] | no_license | thangbk2209/natural_language_understanding | 5a5840662b2deb3361a44f83861b75d157d7f587 | 62f59c733996dd75c532d103f2dd1167d9a59c55 | refs/heads/master | 2020-03-25T22:26:47.462682 | 2018-10-18T02:31:29 | 2018-10-18T02:31:29 | 144,223,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from nltk.tokenize import sent_tokenize, word_tokenize
data_file = open('text_classifier_ver7_fix.txt','w',encoding="utf8")
with open ('text_classifier_ver7.txt',encoding = 'utf-8') as corpus_file:
lines = corpus_file.readlines()
for line in lines:
# token_file.write(words[j] + '\t' + 'O' + '\n')
if('?' in line):
data_file.write(line)
else:
data_file.write(line.rstrip('\n') + ' .' +'\n') | [
"thangbk2209@gmail.com"
] | thangbk2209@gmail.com |
9b2da78b192e59efaa38b4e27b1e24f0b2594f54 | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/network/azure-mgmt-network/generated_samples/configuration_policy_group_put.py | ab6749bf9e7cf800fcf920523b217ca684a8f8cf | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 2,111 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python configuration_policy_group_put.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.configuration_policy_groups.begin_create_or_update(
resource_group_name="rg1",
vpn_server_configuration_name="vpnServerConfiguration1",
configuration_policy_group_name="policyGroup1",
vpn_server_configuration_policy_group_parameters={
"properties": {
"isDefault": True,
"policyMembers": [
{"attributeType": "RadiusAzureGroupId", "attributeValue": "6ad1bd08", "name": "policy1"},
{"attributeType": "CertificateGroupId", "attributeValue": "red.com", "name": "policy2"},
],
"priority": 0,
}
},
).result()
print(response)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/ConfigurationPolicyGroupPut.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | rdomenzain.noreply@github.com |
a7d07d20476888591eaba5d53db73c0733db2002 | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_internal/__init__.py | dbc467573af1172194c53285d70e835a00dbab30 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 2,987 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from collections import namedtuple
from six.moves.urllib_parse import urlparse
from .challenge_auth_policy import ChallengeAuthPolicy, ChallengeAuthPolicyBase
from .client_base import KeyVaultClientBase
from .http_challenge import HttpChallenge
from . import http_challenge_cache as HttpChallengeCache
__all__ = [
"ChallengeAuthPolicy",
"ChallengeAuthPolicyBase",
"HttpChallenge",
"HttpChallengeCache",
"KeyVaultClientBase",
]
_VaultId = namedtuple("VaultId", ["vault_url", "collection", "name", "version"])
def parse_vault_id(url):
try:
parsed_uri = urlparse(url)
except Exception: # pylint: disable=broad-except
raise ValueError("'{}' is not not a valid url".format(url))
if not (parsed_uri.scheme and parsed_uri.hostname):
raise ValueError("'{}' is not not a valid url".format(url))
path = list(filter(None, parsed_uri.path.split("/")))
if len(path) < 2 or len(path) > 3:
raise ValueError("'{}' is not not a valid vault url".format(url))
return _VaultId(
vault_url="{}://{}".format(parsed_uri.scheme, parsed_uri.hostname),
collection=path[0],
name=path[1],
version=path[2] if len(path) == 3 else None,
)
BackupLocation = namedtuple("BackupLocation", ["container_url", "folder_name"])
def parse_folder_url(folder_url):
# type: (str) -> BackupLocation
"""Parse the blob container URL and folder name from a backup's blob storage URL.
For example, https://<account>.blob.core.windows.net/backup/mhsm-account-2020090117323313 parses to
(container_url="https://<account>.blob.core.windows.net/backup", folder_name="mhsm-account-2020090117323313").
"""
try:
parsed = urlparse(folder_url)
# the first segment of the path is the container name
stripped_path = parsed.path.strip("/")
container = stripped_path.split("/")[0]
# the rest of the path is the folder name
folder_name = stripped_path[len(container) + 1 :]
# this intentionally discards any SAS token in the URL--methods require the SAS token as a separate parameter
container_url = "{}://{}/{}".format(parsed.scheme, parsed.netloc, container)
return BackupLocation(container_url, folder_name)
except: # pylint:disable=broad-except
raise ValueError(
'"folder_url" should be the URL of a blob holding a Key Vault backup, for example '
'"https://<account>.blob.core.windows.net/backup/mhsm-account-2020090117323313"'
)
try:
# pylint:disable=unused-import
from .async_challenge_auth_policy import AsyncChallengeAuthPolicy
from .async_client_base import AsyncKeyVaultClientBase
__all__.extend(["AsyncChallengeAuthPolicy", "AsyncKeyVaultClientBase"])
except (SyntaxError, ImportError):
pass
| [
"noreply@github.com"
] | hivyas.noreply@github.com |
421356549a8bf9d1cd5079fe809a2e1e3314f3ec | 4626631c5e68a13ed4dde041212da39d344d74d9 | /examples/scripts/get-managed-sans.py | 561948b1d35765367c528692004cd57cd6306cb1 | [
"MIT"
] | permissive | xod442/python-hpOneView | a1482677e3252dabf1e14f9349c119428331089f | b78fb81cba34992bb84ed3814aae04ce05ef913f | refs/heads/master | 2021-01-18T05:53:42.466348 | 2015-08-11T15:59:16 | 2015-08-11T15:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | #!/usr/bin/env python3
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
import re
if sys.version_info < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def get_managed_sans(fcs):
sans = fcs.get_managed_sans()
pprint(sans)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display or list the available Managed SAN resources in the appliance
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
fcs = hpov.fcsans(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
get_managed_sans(fcs)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"troy@debdev.org"
] | troy@debdev.org |
fc81cd518d3585626eb7564962acc4f1ac8cb8b3 | 4a216ef92a3acca38e8705a67642f1bf2037b571 | /benAnadolu_con/makale/migrations/0005_auto_20210813_2255.py | 57daf3967650a65a2b7d519ab893b8b5cdbc4fd3 | [] | no_license | US3B3/Django-Books-Template | be2bfe53bfc01633a1e087d5852e76720905e406 | 82f4923174d36ffd3f34728c318f1e5ac74973da | refs/heads/main | 2023-08-15T04:57:06.742671 | 2021-09-19T02:58:17 | 2021-09-19T02:58:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Generated by Django 3.2.3 on 2021-08-13 22:55
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('makale', '0004_auto_20210811_1600'),
]
operations = [
migrations.AddField(
model_name='kategori',
name='aciklama',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
migrations.AddField(
model_name='kategori',
name='resim',
field=models.ImageField(default='varsayilan.jpg', upload_to='kategori/%Y/%m/%d/'),
),
]
| [
"="
] | = |
8f54c3fa3599e855b9119cf3cb6e475466c83ce9 | 50d331aec35c1429e0d9b68822623ee9a45b251f | /IPTVPlayer/iptvdm/busyboxdownloader.py | cb2ed44c023f836678bf24c6d483ddaac26cacb0 | [] | no_license | openmb/iptvplayer | cd00c693adcac426214cc45d7ae5c97b9d7cbe91 | bbc3f5b6f445f83639cd1ebb5992dc737bc9023d | refs/heads/master | 2021-01-17T09:58:09.202306 | 2017-03-26T18:19:10 | 2017-03-26T18:19:10 | 83,997,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | # -*- coding: utf-8 -*-
#
# IPTV download manager API
#
# $Id$
#
#
###################################################
# LOCAL import
###################################################
from Plugins.Extensions.IPTVPlayer.tools.iptvtools import printDBG, printExc, iptv_system, eConnectCallback, E2PrioFix
from Plugins.Extensions.IPTVPlayer.iptvdm.basedownloader import BaseDownloader
from Plugins.Extensions.IPTVPlayer.iptvdm.wgetdownloader import WgetDownloader
from Plugins.Extensions.IPTVPlayer.iptvdm.iptvdh import DMHelper
###################################################
###################################################
# FOREIGN import
###################################################
from Tools.BoundFunction import boundFunction
from enigma import eConsoleAppContainer
###################################################
###################################################
# One instance of this class can be used only for
# one download
###################################################
class BuxyboxWgetDownloader(WgetDownloader):
def __init__(self):
printDBG('BuxyboxWgetDownloader.__init__ ----------------------------------')
WgetDownloader.__init__(self)
self.iptv_sys = None
def __del__(self):
printDBG("BuxyboxWgetDownloader.__del__ ----------------------------------")
def getName(self):
return "busybox wget"
def isWorkingCorrectly(self, callBackFun):
self.iptv_sys = iptv_system( "wget 2>&1 ", boundFunction(self._checkWorkingCallBack, callBackFun) )
def _checkWorkingCallBack(self, callBackFun, code, data):
reason = ''
sts = True
if 'Usage: wget' not in data:
sts = False
reason = data
self.iptv_sys = None
callBackFun(sts, reason)
def start(self, url, filePath, params = {}, info_from=None, retries=0):
'''
Owervrite start from BaseDownloader
'''
self.url = url
self.filePath = filePath
self.downloaderParams = params
self.fileExtension = '' # should be implemented in future
self.outData = ''
self.contentType = 'unknown'
if None == info_from:
info_from = WgetDownloader.INFO.FROM_FILE
self.infoFrom = info_from
cmd = 'wget ' + '"' + self.url + '" -O "' + self.filePath + '" > /dev/null'
printDBG("Download cmd[%s]" % cmd)
self.console = eConsoleAppContainer()
self.console_appClosed_conn = eConnectCallback(self.console.appClosed, self._cmdFinished)
self.console.execute( E2PrioFix( cmd ) )
self.wgetStatus = self.WGET_STS.CONNECTING
self.status = DMHelper.STS.DOWNLOADING
self.onStart()
return BaseDownloader.CODE_OK
def _terminate(self):
printDBG("BuxyboxWgetDownloader._terminate")
if None != self.iptv_sys:
self.iptv_sys.kill()
self.iptv_sys = None
if DMHelper.STS.DOWNLOADING == self.status:
if self.console:
self.console.sendCtrlC() # kill # produce zombies
self._cmdFinished(-1, True)
return BaseDownloader.CODE_OK
return BaseDownloader.CODE_NOT_DOWNLOADING
def _cmdFinished(self, code, terminated=False):
printDBG("BuxyboxWgetDownloader._cmdFinished code[%r] terminated[%r]" % (code, terminated))
# break circular references
self.console_appClosed_conn = None
self.console = None
self.wgetStatus = self.WGET_STS.ENDED
# When finished updateStatistic based on file sie on disk
BaseDownloader.updateStatistic(self)
if terminated:
self.status = DMHelper.STS.INTERRUPTED
elif 0 >= self.localFileSize:
self.status = DMHelper.STS.ERROR
elif self.remoteFileSize > 0 and self.remoteFileSize > self.localFileSize:
self.status = DMHelper.STS.INTERRUPTED
else:
self.status = DMHelper.STS.DOWNLOADED
if not terminated:
self.onFinish()
| [
"samsamsam@o2.pl"
] | samsamsam@o2.pl |
ecaf3bcf4fede2cf5d43624547555f1737b0269c | a41e1498e3c080f47abd8e8e57157548df3ebbf1 | /pandas/tests/indexes/ranges/test_join.py | 682b5c8def9ff0e00b533610c1d45a093e7d7a8d | [
"BSD-3-Clause"
] | permissive | pandas-dev/pandas | e7e639454a298bebc272622e66faa9829ea393bb | c7325d7e7e77ecb4a4e57b48bc25265277c75712 | refs/heads/main | 2023-09-01T12:42:07.927176 | 2023-09-01T11:14:10 | 2023-09-01T11:14:10 | 858,127 | 36,166 | 18,728 | BSD-3-Clause | 2023-09-14T21:18:41 | 2010-08-24T01:37:33 | Python | UTF-8 | Python | false | false | 6,268 | py | import numpy as np
from pandas import (
Index,
RangeIndex,
)
import pandas._testing as tm
class TestJoin:
def test_join_outer(self):
# join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = Index(
[0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
)
elidx = np.array(
[0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9, -1, -1, -1, -1, -1, -1, -1],
dtype=np.intp,
)
eridx = np.array(
[-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
dtype=np.intp,
)
assert isinstance(res, Index) and res.dtype == np.dtype(np.int64)
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres, exact=True)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# join with RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
assert isinstance(res, Index) and res.dtype == np.int64
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
# Join with non-RangeIndex
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Index([16, 18])
elidx = np.array([8, 9], dtype=np.intp)
eridx = np.array([9, 7], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# Join two RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres, exact="equiv")
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
# Join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
eres = index
eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# Join withRangeIndex
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
# Join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp)
assert isinstance(other, Index) and other.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# Join withRangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
assert isinstance(other, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
def test_join_non_int_index(self):
index = RangeIndex(start=0, stop=20, step=2)
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = index.join(other, how="outer")
outer2 = other.join(index, how="outer")
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = index.join(other, how="inner")
inner2 = other.join(index, how="inner")
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = index.join(other, how="left")
tm.assert_index_equal(left, index.astype(object))
left2 = other.join(index, how="left")
tm.assert_index_equal(left2, other)
right = index.join(other, how="right")
tm.assert_index_equal(right, other)
right2 = other.join(index, how="right")
tm.assert_index_equal(right2, index.astype(object))
def test_join_non_unique(self):
index = RangeIndex(start=0, stop=20, step=2)
other = Index([4, 4, 3, 3])
res, lidx, ridx = index.join(other, return_indexers=True)
eres = Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp)
eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_self(self, join_type):
index = RangeIndex(start=0, stop=20, step=2)
joined = index.join(index, how=join_type)
assert index is joined
| [
"noreply@github.com"
] | pandas-dev.noreply@github.com |
3d9d812cba98b8dd9aa337aba826cd5d44d24e30 | 813a8e7cc7dcd8d9b07e2c0c45184507d6760d59 | /materials/carbon_steel.py | 9117b2fbfa7dff3a8a33da71627cd76c8c20a677 | [
"MIT"
] | permissive | jultou-raa/materials | de6780e7a2a4ccdccb66a5835631105546a16428 | b5df21545c9fe0f115d9683c5b253b982c35e1ad | refs/heads/master | 2021-08-24T00:50:11.970866 | 2017-12-07T09:52:33 | 2017-12-07T09:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
#
from .helpers import mu0
# [1] https://en.wikipedia.org/wiki/Carbon_steel
# [2]
# https://en.wikipedia.org/wiki/Permeability_(electromagnetism)#Values_for_some_common_materials
# [3] https://en.wikipedia.org/wiki/List_of_thermal_conductivities
# [4]
# https://en.wikipedia.org/wiki/Heat_capacity#Table_of_specific_heat_capacities
#
# [1]
magnetic_permeability = 100*mu0
density = 7.85e3
# [3]
thermal_conductivity = 50.0
# stainless steel @293K:
electrical_conductivity = 1.180e6
# [4]
specific_heat_capacity = 0.466e3
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
fbcd624cd08dbd69701cdefb2d86373655f136df | ae8254fdc04306e90df7e0359460e120498eabb5 | /src/Pipelines/TrackML_Example/LightningModules/GNN/Models/split_checkpoint_agnn.py | 33d51d08a7cdb427231670a4de5c2699a357cc7a | [] | no_license | vhewes/Tracking-ML-Exa.TrkX | 23a21578a5275b0fe112a30489e02f19e21c7bbe | b8e94a85fc7688acc649693c35069b8d2a8594e0 | refs/heads/master | 2023-03-07T07:12:24.250885 | 2021-02-19T01:09:33 | 2021-02-19T01:09:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,281 | py | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from .checkpoint_agnn import CheckpointedResAGNN
class SplitCheckpointedResAGNN(CheckpointedResAGNN):
def __init__(self, hparams):
super().__init__(hparams)
print("Initialised")
def training_step(self, batch, batch_idx):
weight = (torch.tensor(self.hparams["weight"]) if ("weight" in self.hparams)
else torch.tensor((~batch.y_pid.bool()).sum() / batch.y_pid.sum()))
output = (self(torch.cat([batch.cell_data, batch.x], axis=-1),
batch.edge_index).squeeze()
if ('ci' in self.hparams["regime"])
else self(batch.x, batch.edge_index).squeeze())
if ('pid' in self.hparams["regime"]):
y_pid = (batch.pid[batch.edge_index[0, batch.nested_ind[0]]] == batch.pid[batch.edge_index[1, batch.nested_ind[0]]]).float()
loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], y_pid.float(), pos_weight = weight)
else:
loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], batch.y[batch.nested_ind[0]], pos_weight = weight)
result = pl.TrainResult(minimize=loss)
result.log('train_loss', loss, prog_bar=True)
return result
# def validation_step(self, batch, batch_idx):
# weight = (torch.tensor(self.hparams["weight"]) if ("weight" in self.hparams)
# else torch.tensor((~batch.y_pid.bool()).sum() / batch.y_pid.sum()))
# output = (self(torch.cat([batch.cell_data, batch.x], axis=-1), batch.edge_index).squeeze()
# if ('ci' in self.hparams["regime"])
# else self(batch.x, batch.edge_index).squeeze())
# if ('pid' in self.hparams["regime"]):
# y_pid = (batch.pid[batch.edge_index[0, batch.nested_ind[0]]] == batch.pid[batch.edge_index[1, batch.nested_ind[0]]]).float()
# val_loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], y_pid.float(), pos_weight = weight)
# else:
# val_loss = F.binary_cross_entropy_with_logits(output[batch.nested_ind[0]], batch.y[batch.nested_ind[0]], pos_weight = weight)
# result = pl.EvalResult(checkpoint_on=val_loss)
# result.log('val_loss', val_loss)
# #Edge filter performance
# preds = F.sigmoid(output[batch.nested_ind[0]]) > self.hparams["edge_cut"] #Maybe send to CPU??
# edge_positive = preds.sum().float()
# if ('pid' in self.hparams["regime"]):
# y_pid = batch.pid[batch.edge_index[0, batch.nested_ind[0]]] == batch.pid[batch.edge_index[1, batch.nested_ind[0]]]
# edge_true = y_pid.sum().float()
# edge_true_positive = (y_pid & preds).sum().float()
# else:
# edge_true = batch.y[batch.nested_ind[0]].sum()
# edge_true_positive = (batch.y[batch.nested_ind[0]].bool() & preds).sum().float()
# result.log_dict({'eff': torch.tensor(edge_true_positive/edge_true), 'pur': torch.tensor(edge_true_positive/edge_positive)})
# return result | [
"murnanedaniel@hotmail.com"
] | murnanedaniel@hotmail.com |
bef6d6e70e9196ed235a9bfec8ddb7e7233f9915 | 83624401467510aaf8e69328b0d9915e1cf0c5ce | /frites/conn/__init__.py | 3ee1a6f48de9ff32e79460c19a8809375484802e | [
"BSD-3-Clause"
] | permissive | MatthieuGilson/frites | a1f5c128b1f6b59eb7c1ba2a7740dea8eaddeb30 | 0e1b99f396d5b54b69f7a5cf962679f4afc0e776 | refs/heads/master | 2023-03-09T17:15:51.841205 | 2021-02-27T11:11:22 | 2021-02-27T11:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | """Information-based connectivity metrics and utility functions.
This submodule contains two types of functions :
1. **Connectivity metrics :** methods to estimate either the undirected or
directed connectivity. Some methods are performed within-trials and others
across-trials. In the case of within-trials metrics, it is then possible to
estimate if the connectivity is modulated by the task by passing the
connectivity arrays to `frites.workflow.WfMi`
2. **Connectivity related utility functions :** small utility functions that
work on connectivity arrays
"""
# connectivity metrics
from .conn_covgc import conn_covgc # noqa
from .conn_dfc import conn_dfc # noqa
from .conn_transfer_entropy import conn_transfer_entropy # noqa
# connectivity utility functions
from .conn_sliding_windows import define_windows, plot_windows # noqa
from .conn_utils import (conn_get_pairs, conn_reshape_undirected, # noqa
conn_reshape_directed)
| [
"e.combrisson@gmail.com"
] | e.combrisson@gmail.com |
671dafe0c0d3faa4bd51fd3ab010fbe69d9911f0 | 19361e1df45f755c67da3ddc47b6e4ed2cfa079f | /faker/faker_data_generator.py | 6b82a6a0c68747bdb864015d715aed85f75a32ce | [] | no_license | WilliamQLiu/python-examples | 99fb538191196714ded91f9ac97dec107aa22440 | 731113038b6574e3ede72cc61921b0aee8e3bafa | refs/heads/master | 2023-04-27T02:19:22.626542 | 2022-10-02T16:44:41 | 2022-10-02T16:44:41 | 11,562,476 | 74 | 67 | null | 2023-04-15T13:36:17 | 2013-07-21T14:03:14 | Jupyter Notebook | UTF-8 | Python | false | false | 4,855 | py | """ Requires pip install fake-factory """
# pylint: disable=I0011,C0103,W0142,E1101,C0304
# http://docs.python.org/2/library/xml.etree.elementtree.html
# https://pypi.python.org/pypi/fake-factory
import xml.etree.ElementTree as ET
from faker import Factory
if __name__ == '__main__':
faker = Factory.create() # Create and instantiate a Faker generator
# Setup Element Tree
root = ET.Element("root") # root
calls = ET.SubElement(root, "Calls") # Calls
call = ET.SubElement(calls, "Call") # Call
reportversion = ET.SubElement(call, "ReportVersion")
calldateandtimestart = ET.SubElement(call, "CallDateAndTimeStart")
calldateandtimeend = ET.SubElement(call, "CallDateAndTimeEnd")
phoneworker = ET.SubElement(call, "PhoneWorker")
pfirstname = ET.SubElement(phoneworker, "FirstName") # Phone Work First Name
plastname = ET.SubElement(phoneworker, "LastName") # Phone Work Last Name
caller = ET.SubElement(call, "Caller")
callername = ET.SubElement(caller, "CallerName")
cfirstname = ET.SubElement(callername, "FirstName") # Caller First Name
cmiddlename = ET.SubElement(callername, "MiddleName") # Caller Middle Name
clastname = ET.SubElement(callername, "LastName") # Caller Last Name
callerlocation = ET.SubElement(caller, "CallerLocation")
ccountry = ET.SubElement(callerlocation, "Country")
cstateprovince = ET.SubElement(callerlocation, "StateProvince")
ccounty = ET.SubElement(callerlocation, "County")
ccity = ET.SubElement(callerlocation, "City")
cpostalcode = ET.SubElement(callerlocation, "PostalCode")
caddress = ET.SubElement(callerlocation, "Address")
callerphonenumber = ET.SubElement(caller, "CallerPhoneNumber")
callnotes = ET.SubElement(call, "CallNotes")
# Put in Fake values
call.set("ID", str(faker.random_number(digits=9)))
reportversion.set("ID", str(faker.random_number(digits=4)))
reportversion.text = str(faker.random_element(\
array=('H2H', 'DDH', 'OASAS')))
calldateandtimestart.set("TimeZone", str(faker.timezone()))
calldateandtimestart.text = str(faker.date_time_this_year())
calldateandtimeend.set("TimeZone", str(faker.timezone()))
calldateandtimeend.text = str(faker.date_time_this_year())
phoneworker.set("ID", str(faker.random_number(digits=5)))
pfirstname.text = str(faker.first_name()) # Phone Worker First Name
plastname.text = str(faker.last_name()) # Phone Worker Last Name
caller.set("ID", str(faker.random_number(digits=6)))
cfirstname.text = str(faker.first_name()) # Caller First Name
cmiddlename.text = str(faker.first_name()) # Caller Last Name
clastname.text = str(faker.last_name()) # Caller Last Name
ccountry.text = str(faker.country())
cstateprovince.text = str(faker.state_abbr())
ccounty.text = str(faker.city()) # Nothing for counties
cpostalcode.text = str(faker.postcode())
caddress.text = str(faker.street_address())
callerphonenumber.text = str(faker.phone_number())
callnotes.text = str(faker.paragraphs(nb=3))
# Write entire tree to xml
tree = ET.ElementTree(root)
tree.write("fakedata.xml")
"""
<?xml version="1.0" encoding="utf-8"?>
<root>
<Calls>
<Call ID="15784825">
<ReportVersion ID="333">H2H</ReportVersion>
<CallDateAndTimeStart TimeZone="UTC-8">2013-10-01 00:44</CallDateAndTimeStart>
<CallDateAndTimeEnd TimeZone="UTC-8">2013-10-01 01:27</CallDateAndTimeEnd>
<CallLength>43</CallLength>
<PhoneWorker ID="30591">
<FirstName>Susan</FirstName>
<LastName>Stevens</LastName>
</PhoneWorker>
<Caller ID="989898">
<CallerName>
<FirstName>Bob</FirstName>
<MiddleName>Scott</MiddleName>
<LastName>Jones></LastName>
</CallerName>
<CallerLocation>
<Country>US</Country>
<StateProvince>CA</StateProvince>
<County>Alameda</County>
<City>Oakland</City>
<PostalCode>94444</PostalCode>
<Address>133 Elm Street</Address>
</CallerLocation>
<CallerPhoneNumber>510-555-1212</CallerPhoneNumber>
</Caller>
<CallNotes>This is my note! My notes can be very long.</CallNotes>
<CustomFields>
<Field ID="1234" FieldName="Gender">
<Value ID="9876">Male</Value>
</Field>
<Field ID="1235" FieldName="Age Group">
<Value ID="9875">25-29</Value>
</Field>
<Field ID="1236" FieldName="Mental Status Assessment - Functional">
<Value ID="9874">Sleep disturbance</Value>
<Value ID="9873">Fatigued</Value>
<Value ID="9872">Depressed</Value>
</Field>
</CustomFields>
</Call>
</Calls>
</root>
""" | [
"William.Q.Liu@gmail.com"
] | William.Q.Liu@gmail.com |
6b619b3c483cf421f5d62ed91e0103f4bd31ada4 | 492cfeab952ad8533f3fc3ca7b4267ec31cb8d30 | /myapp/celery.py | 8d6bb15014cecf1fa9dc2ceb5d6ef6316c4db4ea | [] | no_license | ohahlev/flask-module | b0ebadd32cd1937dffddf3c9e056eccac140e3a7 | 1ee139a789dd22007adafb0c77cc4595ebcc4c7e | refs/heads/master | 2020-09-07T10:39:43.605884 | 2019-11-10T07:14:58 | 2019-11-10T07:14:58 | 220,753,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # myapp/celery.py
from flask_appfactory.celery import celeryfactory
from .app import create_app
celery = celeryfactory(create_app())
| [
"ohahlev@gmail.com"
] | ohahlev@gmail.com |
a290594538be0325021a941a6df3b199cd21a016 | 1b19103c7781c31b4042e5404eea46fa90014a70 | /cenit_google_proximity_beacon_api_v1beta1/models/config.py | b223ed193d9049c93cdfe58ede68332321064fba | [] | no_license | andhit-r/odoo-integrations | c209797d57320f9e49271967297d3a199bc82ff5 | dee7edc4e9cdcc92e2a8a3e9c34fac94921d32c0 | refs/heads/8.0 | 2021-01-12T05:52:26.101701 | 2016-12-22T03:06:52 | 2016-12-22T03:06:52 | 77,223,257 | 0 | 1 | null | 2016-12-23T12:11:08 | 2016-12-23T12:11:08 | null | UTF-8 | Python | false | false | 2,546 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010, 2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import models, fields
_logger = logging.getLogger(__name__)
COLLECTION_NAME = "google_proximity_beacon_api_v1beta1"
COLLECTION_VERSION = "0.1"
COLLECTION_PARAMS = {
# WITHOUT COLLECTION_PARAMS.
}
class CenitIntegrationSettings(models.TransientModel):
_name = "cenit.google_proximity_beacon_api_v1beta1.settings"
_inherit = 'res.config.settings'
############################################################################
# Pull Parameters
############################################################################
# WITHOUT PULL PARAMETERS.
############################################################################
# Default Getters
############################################################################
# WITHOUT GETTERS.
############################################################################
# Default Setters
############################################################################
# WITHOUT SETTERS.
############################################################################
# Actions
############################################################################
def install(self, cr, uid, context=None):
installer = self.pool.get('cenit.collection.installer')
data = installer.get_collection_data(
cr, uid,
COLLECTION_NAME,
version = COLLECTION_VERSION,
context = context
)
installer.install_collection(cr, uid, {'name': COLLECTION_NAME})
| [
"sanchocuba@gmail.com"
] | sanchocuba@gmail.com |
8beb7d33ae0a84fb331b4b7dc956f2ce5f95bb1e | 6eb8fa32f3d2ccc2aa7196ed702d4cc35c66d597 | /Week_01/0001.py | 581f1e99a5ad39046d1f43428ca77e27ca3e39a1 | [] | no_license | mach8686devops/AlgorithmCHUNZHAO | a7490c684fc89504e9c2a633a18ea250262b6dcc | 497e833b3843ed5222d3b2fc96c00fbc4b6e8550 | refs/heads/main | 2023-03-25T03:13:45.313989 | 2021-03-12T12:38:24 | 2021-03-12T12:38:24 | 330,971,263 | 0 | 0 | null | 2021-01-19T12:23:49 | 2021-01-19T12:23:48 | null | UTF-8 | Python | false | false | 398 | py | # 两数之和
# 哈希表
class Solution:
def twoSum(self, nums, target):
mapping = {}
for i, item in enumerate(nums):
if (target - item) in mapping:
return [mapping[target - item], i]
mapping[item] = i
# 注意是否有一定能够找到的条件
# return [-1, -1]
s = Solution()
print(s.twoSum([2, 7, 11, 15], 9))
| [
"zhangjohn202@gmail.com"
] | zhangjohn202@gmail.com |
090d7e24217c1a131ee19c720ec5adff441bb282 | 2b8f1b067a6602a6520e9846a2df8b83a359623a | /BOJ/BaaarkingDog/0x1D_다익스트라 알고리즘/17835.py | 048443f9ddf4f5ef97defa6cf7789b1c7040321c | [] | no_license | ymink716/PS | 3f9df821a1d4db110cd9d56b09b4c1d756951dd8 | e997ecf5a3bec1d840486b8d90b934ae1cbafe94 | refs/heads/master | 2023-08-18T18:21:45.416083 | 2023-08-16T07:26:18 | 2023-08-16T07:26:18 | 218,685,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | # 면접보는 승범이네
# https://www.acmicpc.net/problem/17835
import heapq
import sys
input = sys.stdin.readline
n, m, k = map(int, input().split())
graph = [[] for _ in range(n + 1)]
# 도시 연결 정보를 역방향으로 지정
for _ in range(m):
u, v, c = map(int, input().split())
graph[v].append((u, c))
# 면접장 리스트
targets = list(map(int, input().split()))
def dijkstra():
q = []
# 힙큐에 면접장들을 모두 넣어줌
for t in targets:
heapq.heappush(q, (0, t))
distance[t] = 0
while q:
dist, now = heapq.heappop(q) # 가장 최단 거리가 짧은 도시 꺼내
# 해당 도시의 최단 거리 정보가 이미 갱신되어 현재 비용보다 적다면 넘어감
if distance[now] < dist:
continue
# 현재 도시와 연결된 도시들을 확인
for i in graph[now]:
cost = dist + i[1]
# 현재 도시를 거쳐 다른 도시로 이동하는 거리가 더 짧은 경우
if cost < distance[i[0]]:
distance[i[0]] = cost # 최단 거리 갱신
heapq.heappush(q, (cost, i[0])) # 힙큐에 넣어줌
distance = [int(1e11)] * (n + 1)
city, dist = 0, 0
dijkstra()
for i in range(1, n + 1):
if dist < distance[i]:
city, dist = i, distance[i]
print(city)
print(dist)
| [
"ymink716@gmail.com"
] | ymink716@gmail.com |
f4a46ee857aeca963d206c48f92c2d37fd973b73 | 2c33ab38df0a0ffd617513640fb91fcc360d3bc3 | /Graph-Algorithm/HoughLine/hough_line.py | 13c1f5b2e48cf818ee420912512ed21d2f043fa8 | [
"Apache-2.0"
] | permissive | FDU-VTS/CVCode | 71a0dc82cd973aca55e0d97ea015d8dc66d2cc94 | e9576dfbdd1ae2ff986dadde3183eb6bc0380f76 | refs/heads/master | 2021-07-22T10:42:34.384289 | 2020-04-24T09:48:43 | 2020-04-24T09:48:43 | 149,054,139 | 33 | 10 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | import matplotlib.pyplot as plt
import skimage.io
import skimage.color
import skimage.morphology
import skimage.transform
import skimage.feature
import copy
original_img = skimage.io.imread("cars.png")
img = copy.deepcopy(original_img)
h, w, _ = img.shape
for i in range(h):
for j in range(w):
r, g, b, a = img[i, j]
if r < 180 or g < 180 or b < 180:
img[i, j] = [0, 0, 0, 255]
img = skimage.color.rgb2gray(skimage.color.rgba2rgb(img))
img = skimage.morphology.dilation(img)
img = skimage.feature.canny(img)
lines = skimage.transform.probabilistic_hough_line(img)
for line in lines:
p0, p1 = line
plt.plot((p0[0], p1[0]), (p0[1], p1[1]), color='red')
plt.imshow(original_img)
plt.show()
| [
"529768926@qq.com"
] | 529768926@qq.com |
4e12442854c79ceaa4258e2f2b486a52b1219285 | 2acd5555380f6dffd7a25c1ffcceacdb83ad1b41 | /chapter_2/loadbalancer/scrap/main.py | 2a47814565f2fb9ddd00687b2a47ecdd353f3cf7 | [
"Apache-2.0"
] | permissive | minsung8/monitering_exercise | 1d2f37ac4c70591195640b1f79625686e66b71d6 | ecfaba49d7406a282d4db94b02febd83dfc03e51 | refs/heads/main | 2023-09-01T17:18:30.230581 | 2021-10-27T13:35:56 | 2021-10-27T13:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | from typing import Optional
from fastapi import FastAPI, Request, Response
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from datetime import datetime
import sys
import httpx
from bs4 import BeautifulSoup
import urllib.parse
from exceptions import UnicornException
from settings import Settings
from log import init_log
from cors import init_cors
from instrumentator import init_instrumentator
from config import Config
app = FastAPI()
my_settings = Settings()
conf = Config(my_settings.CONFIG_PATH)
init_log(app, conf.section("log")["path"])
init_cors(app)
init_instrumentator(app)
@app.exception_handler(UnicornException)
async def unicorn_exception_handler(request: Request, exc: UnicornException):
import traceback
traceback.print_exc(file=sys.stderr)
return JSONResponse(
status_code=exc.status,
content={"code": exc.code, "message": exc.message},
)
async def call_api(url: str):
async with httpx.AsyncClient() as client:
r = await client.get(url)
return r.text
def parse_opengraph(body: str):
soup = BeautifulSoup(body, 'html.parser')
title = soup.find("meta", {"property":"og:title"})
url = soup.find("meta", {"property":"og:url"})
og_type = soup.find("meta", {"property":"og:type"})
image = soup.find("meta", {"property":"og:image"})
description = soup.find("meta", {"property":"og:description"})
author = soup.find("meta", {"property":"og:article:author"})
resp = {}
scrap = {}
scrap["title"] = title["content"] if title else None
scrap["url"] = url["content"] if url else None
scrap["type"] = og_type["content"] if og_type else None
scrap["image"] = image["content"] if image else None
scrap["description"] = description["content"] if description else None
scrap["author"] = author["content"] if author else None
resp["scrap"] = scrap
return resp
@app.get("/api/v1/scrap")
async def scrap(url: str):
try:
url = urllib.parse.unquote(url)
body = await call_api(url)
return parse_opengraph(body)
except Exception as e:
raise UnicornException(status=400, code=-20000, message=str(e))
| [
"charsyam@naver.com"
] | charsyam@naver.com |
e6e49fa0abbb83bea3b06e6e58562c59d83cd3e7 | 3097d8735287c8e574c56831d0d49eeb4c624ad7 | /luxon/core/session/__init__.py | 6fa20d027bdd77b3a585aca7eb9142c649fc066a | [
"BSD-3-Clause"
] | permissive | TachyonicProject/luxon | 897b91e8d24bb41b66dd09c7df8933e8f8135753 | 5cfdc41a9983821b27f832c87e6424d90c0a8098 | refs/heads/development | 2020-03-12T16:49:09.961915 | 2020-01-08T11:58:12 | 2020-01-08T11:58:12 | 130,724,301 | 5 | 7 | NOASSERTION | 2020-01-08T11:58:40 | 2018-04-23T16:15:40 | Python | UTF-8 | Python | false | false | 356 | py | from luxon.core.session.session import Session
from luxon.core.session.sessionauth import TrackToken
from luxon.core.session.sessioncookie import SessionCookie as Cookie
from luxon.core.session.sessioncookie import TrackCookie
from luxon.core.session.sessionredis import SessionRedis as Redis
from luxon.core.session.sessionfile import SessionFile as File
| [
"christiaan.rademan@gmail.com"
] | christiaan.rademan@gmail.com |
bfbae7f8b7bc700d28b3310a6c1074b340bb4859 | 22b30b51d6eabbcc9c317c3825f6bcb00f947b56 | /model/mysite1/app1/migrations/0001_initial.py | 7d7ae241763fd77cf102d24472427517100b9689 | [] | no_license | SanjayPJ/doc-exercise-django | eeb04cdcdee2618e65d46c188cb7ffe7bce11704 | 71857c82709600479299b726560d526a74050695 | refs/heads/master | 2020-03-28T02:04:28.097576 | 2018-09-16T06:50:08 | 2018-09-16T06:50:08 | 147,545,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # Generated by Django 2.1 on 2018-09-16 05:27
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, default=True)),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"sanjaypjayan2000@gmail.com"
] | sanjaypjayan2000@gmail.com |
8b8922533a39f13fe8ef9f9ed2fa1b7a6213aa3e | b51f277dfe339ea30dce10040eca40c20bd8a4dd | /src/weixin_v3/order_pay.py | d2095ef0ffd84e24cb351f2f2f67050bdb148f7e | [
"BSD-3-Clause"
] | permissive | jack139/fair | e08b3b48391d0cb8e72bbc47e7592c030f587f48 | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | refs/heads/master | 2021-06-30T15:17:15.590764 | 2020-09-23T07:14:20 | 2020-09-23T07:14:20 | 160,322,019 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,685 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web, json, time
from bson.objectid import ObjectId
from config import setting
import app_helper
db = setting.db_web
url = ('/wx/order_pay')
# 支付完成
class handler:
def POST(self):
web.header('Content-Type', 'application/json')
param = web.input(openid='', session_id='', order_id='', pay_type='', data='')
print param
if '' in (param.order_id, param.pay_type):
return json.dumps({'ret' : -2, 'msg' : '参数错误'})
if param.openid=='' and param.session_id=='':
return json.dumps({'ret' : -2, 'msg' : '参数错误1'})
# 同时支持openid和session_id
if param.openid!='':
uname = app_helper.check_openid(param.openid)
else:
uname = app_helper.wx_logged(param.session_id)
if uname:
db_user = db.app_user.find_one({'openid':uname['openid']},{'coupon':1, 'credit':1})
if db_user==None: # 不应该发生
return json.dumps({'ret' : -5, 'msg' : '未找到用户信息'})
# 支付操作:1,记录订单支付,2.改变订单状态,3.修改库存显示 !!!!!!
# 获得订单
db_order = db.order_app.find_one(
{'order_id' : param.order_id},
#{'status':1, 'cart':1, 'due':1, 'shop':1}
{'_id':0}
)
if db_order==None:
return json.dumps({'ret' : -3, 'msg' : '未找到订单!'})
# 支付宝和微信支付订单,已PAID说明提前收到异步通知
if db_order['status']=='PAID' and param.pay_type in ('ALIPAY','WXPAY'):
# 记录此次调用
db.order_app.update_one(
{
'order_id' : param.order_id,
},
{
'$set' : {
'pay_type' : param.pay_type,
'pay' : db_order['due'],
'paid2_time' : app_helper.time_str(),
'paid2_tick' : int(time.time()),
},
'$push' : { 'history' : (app_helper.time_str(), uname['openid'], '提交付款')},
}
)
return json.dumps({'ret' : 0, 'data' : {
'order_id' : param.order_id,
'due' : db_order['due'],
'paid' : db_order['due'],
'status' : '已支付'
}})
# 只能处理未支付订单
if db_order['status']!='DUE':
return json.dumps({'ret' : -3, 'msg' : '不是待付款订单!'})
# 微信支付未到账处理
if param.pay_type in ('ALIPAY', 'WXPAY'):
# 更新销货单信息,
r = db.order_app.find_one_and_update(
{
'order_id' : param.order_id,
'status' : 'DUE'
},
{
'$set' : {
'status' : 'PREPAID',
'pay_type' : param.pay_type,
'pay' : db_order['due'],
'paid2_time' : app_helper.time_str(),
'paid2_tick' : int(time.time()),
'pay_data' : param.data,
},
'$push' : { 'history' : (app_helper.time_str(), uname['openid'], '提交付款')},
},
{'status':1}
)
# 如果不是DUE,说明已收到异步通知
if r==None:
db.order_app.update_one(
{
'order_id' : param.order_id,
},
{
'$set' : {
'pay_type' : param.pay_type,
'pay' : db_order['due'],
'paid2_time' : app_helper.time_str(),
'paid2_tick' : int(time.time()),
},
'$push' : { 'history' : (app_helper.time_str(), uname['openid'], '提交付款')},
}
)
# 返回
return json.dumps({'ret' : 0, 'data' : {
'order_id' : param.order_id,
'due' : db_order['due'],
'paid' : db_order['due'],
'status' : '已支付',
'alert' : False,
'message' : '测试信息,还未收到异步通知',
'url' : 'http://app-test.urfresh.cn'
}})
else:
return json.dumps({'ret' : -4, 'msg' : '无效的openid'})
| [
"gt@f8geek.com"
] | gt@f8geek.com |
9bc56a8bfcd997a780c81f0a1f8e6b3cae3daf90 | 8cfee59143ecd307fe7d7a27986c3346aa8ce60c | /Analysis/Excel_Chap03/6excel_value_matches_pattern.py | 9e2b0eb303f8c3f83bb8fb7b8c5fcb805d8cd35f | [] | no_license | kiminhan/Python | daafc1fde804f172ebfb1385ab9d6205c7a45970 | dc6af486aaf7d25dbe13bcee4e115207f37d4696 | refs/heads/master | 2020-03-08T19:18:10.173346 | 2018-09-06T06:11:40 | 2018-09-06T06:11:40 | 128,288,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | #!/usr/bin/env python3
import re
import sys
from datetime import date
from xlrd import open_workbook, xldate_as_tuple
from xlwt import Workbook
input_file = sys.argv[1]
output_file = sys.argv[2]
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('jan_2013_output')
pattern = re.compile(r'(?P<my_pattern>^J.*)')
customer_name_column_index = 1
with open_workbook(input_file) as workbook:
worksheet = workbook.sheet_by_name('january_2013')
data = []
header = worksheet.row_values(0)
data.append(header)
for row_index in range(1,worksheet.nrows):
row_list = []
if pattern.search(worksheet.cell_value(row_index, customer_name_column_index)):
for column_index in range(worksheet.ncols):
cell_value = worksheet.cell_value(row_index, column_index)
cell_type = worksheet.cell_type(row_index, column_index)
if cell_type == 3:
date_cell = xldate_as_tuple(cell_value, workbook.datemode)
date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')
row_list.append(date_cell)
else:
row_list.append(cell_value)
if row_list:
data.append(row_list)
for list_index, output_list in enumerate(data):
for element_index, element in enumerate(output_list):
output_worksheet.write(list_index, element_index, element)
output_workbook.save(output_file) | [
"rladlsgks4@naver.com"
] | rladlsgks4@naver.com |
6ff2697e438b56f55cf9199bf14522a507e9629e | 5bca2d5a7615d2783fae7a7569d57a9a9eb3d604 | /migrations/versions/73964d6faffe_tables.py | 56f6959a2501085e0b2d1356b2a6a39945db123f | [] | no_license | eduardolujan/product_hub | 9ff3fbf11b4703993c1efb2a6202ed3b1c446cda | 0bfe0059ab0d59243794b03f70ceffe3a1a263be | refs/heads/main | 2023-03-01T15:58:05.014636 | 2021-02-03T03:04:31 | 2021-02-03T03:04:31 | 330,279,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py | """Tables
Revision ID: 73964d6faffe
Revises: 61f765248e88
Create Date: 2021-01-24 14:52:29.409679
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '73964d6faffe'
down_revision = '61f765248e88'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('store',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('address',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('street', sa.String(), nullable=False),
sa.Column('external_number', sa.String(), nullable=False),
sa.Column('internal_number', sa.String(), nullable=False),
sa.Column('city', sa.String(), nullable=False),
sa.Column('state', sa.String(), nullable=False),
sa.Column('country', sa.String(), nullable=False),
sa.Column('zipcode', sa.String(), nullable=False),
sa.Column('store_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['store_id'], ['store.id'], initially='DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_address_store_id'), 'address', ['store_id'], unique=False)
op.create_table('product',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('price', sa.Float(), nullable=False),
sa.Column('sku', sa.String(), nullable=False),
sa.Column('store_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['store_id'], ['store.id'], initially='DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_product_store_id'), 'product', ['store_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_product_store_id'), table_name='product')
op.drop_table('product')
op.drop_index(op.f('ix_address_store_id'), table_name='address')
op.drop_table('address')
op.drop_table('store')
# ### end Alembic commands ###
| [
"eduardo.lujan.p@gmail.com"
] | eduardo.lujan.p@gmail.com |
eba70dfb05f89a0f5c308fa5ba16f7afa78b5a5a | 3b547e9f54a6391eee26dacdfb8c182db51861fa | /eval_model.py | bc76e12a74d1a2e93cf3f2f9573fa1ffe782a59b | [
"MIT"
] | permissive | LevinJ/CNN_LSTM_CTC_Tensorflow | faedcdd2574725ddf507e68b3584c693f7b6f470 | b6ee12032757136bdf0bcc2b21ad1605f7296413 | refs/heads/master | 2021-05-06T05:23:39.857506 | 2018-03-06T05:32:37 | 2018-03-06T05:32:37 | 115,079,266 | 3 | 3 | null | 2017-12-22T05:17:41 | 2017-12-22T05:17:40 | null | UTF-8 | Python | false | false | 2,868 | py | import datetime
import logging
import os
import time
import cv2
import numpy as np
import tensorflow as tf
import cnn_lstm_otc_ocr
import utils
import helper
from preparedata import PrepareData
FLAGS = utils.FLAGS
import math
import argparse
log_dir = './log/evals'
class EvaluateModel(PrepareData):
def __init__(self):
PrepareData.__init__(self)
return
def parse_param(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--split_name', help='which split of dataset to use', default="eval")
parser.add_argument('-c', '--checkpoint_path', help='which checkpoint to use', default= "./checkpoint/")
args = parser.parse_args()
self.checkpoint_path = args.checkpoint_path
self.split_name = args.split_name
return
def eval_model(self):
model = cnn_lstm_otc_ocr.LSTMOCR('eval')
model.build_graph()
val_feeder, num_samples = self.input_batch_generator(self.split_name, is_training=False, batch_size = FLAGS.batch_size)
num_batches_per_epoch = int(math.ceil(num_samples / float(FLAGS.batch_size)))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
eval_writer = tf.summary.FileWriter("{}/{}".format(log_dir, self.split_name), sess.graph)
if tf.gfile.IsDirectory(self.checkpoint_path):
checkpoint_file = tf.train.latest_checkpoint(self.checkpoint_path)
else:
checkpoint_file = self.checkpoint_path
print('Evaluating checkpoint_path={}, split={}, num_samples={}'.format(checkpoint_file, self.split_name, num_samples))
saver.restore(sess, checkpoint_file)
for i in range(num_batches_per_epoch):
inputs, labels, _ = next(val_feeder)
feed = {model.inputs: inputs,
model.labels: labels}
start = time.time()
_ = sess.run(model.names_to_updates, feed)
elapsed = time.time()
elapsed = elapsed - start
# print('{}/{}, {:.5f} seconds.'.format(i, num_batches_per_epoch, elapsed))
# print the decode result
summary_str, step = sess.run([model.merged_summay, model.global_step])
eval_writer.add_summary(summary_str, step)
return
def run(self):
self.parse_param()
self.eval_model()
return
if __name__ == "__main__":
obj= EvaluateModel()
obj.run()
| [
"jianzhirong@gmail.com"
] | jianzhirong@gmail.com |
91c4c6728c8b549597426dee3069b2ca120916d3 | 25427cf7ac5ae9f8e5d421e953750a46fb2d1ebc | /ZSY/ZSY_BOM_MAN/View/migrations/0001_initial.py | 2f7a8e07d649c6ff8ba914dfe1d8fbe63115f633 | [] | no_license | povillechan/Python | d48e2e25c9961acef45162ca882b547e5b9d0b77 | 67e88d6d7bdbe49b0c5165d9b35f37dccf638877 | refs/heads/master | 2020-03-22T08:43:44.606336 | 2019-09-01T15:25:57 | 2019-09-01T15:25:57 | 139,786,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | # Generated by Django 2.0.5 on 2018-06-04 09:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bomName', models.CharField(max_length=128, verbose_name='Bom����')),
('bomVersion', models.CharField(max_length=20, verbose_name='Bom�汾')),
('bomContext', models.TextField(max_length=128, verbose_name='Bom����')),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paperName', models.CharField(max_length=128, verbose_name='ͼֽ����')),
('paperVersion', models.CharField(max_length=20, verbose_name='ͼֽ�汾')),
('paperDiscrib', models.CharField(max_length=128, verbose_name='ͼֽ����')),
('paperAddr', models.TextField(max_length=256, verbose_name='ͼֽ��ַ')),
],
),
migrations.CreateModel(
name='Product2Bom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productName', models.CharField(max_length=128, verbose_name='��Ʒ����')),
('bomName', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='View.Bom')),
],
),
migrations.AlterUniqueTogether(
name='paper',
unique_together={('paperName', 'paperVersion')},
),
migrations.AlterUniqueTogether(
name='bom',
unique_together={('bomName', 'bomVersion')},
),
]
| [
"poville@yeah.net"
] | poville@yeah.net |
283035063cf583338fab7543c71cb3ddd00b28aa | 38d34dcc8b14b05063cef875734eb4866591d133 | /medicifyapp/migrations/0004_auto_20210109_2156.py | d391b88ff8708bfb59a0c699cbb75fad7565470d | [] | no_license | AbdurRahman111/Awsome_E-Commerce_Website | 02a4061a3842559d14dd38f7a00a61b403a04822 | 743b702bb3e87e38aaf63470b67398ee33a10358 | refs/heads/master | 2023-02-25T09:38:51.146166 | 2021-02-05T16:29:00 | 2021-02-05T16:29:00 | 336,326,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # Generated by Django 3.1.4 on 2021-01-09 15:56
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('medicifyapp', '0003_auto_20210108_1902'),
]
operations = [
migrations.CreateModel(
name='bennar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10000)),
('image', models.ImageField(upload_to='uploads/product_image')),
],
),
migrations.AlterField(
model_name='order',
name='order_date',
field=models.DateField(blank=True, default=datetime.datetime(2021, 1, 9, 21, 56, 8, 455385)),
),
migrations.AlterField(
model_name='posted_jobs',
name='post_date',
field=models.DateField(blank=True, default=datetime.datetime(2021, 1, 9, 21, 56, 8, 454387)),
),
]
| [
"mdabdurrahmanchowdhury1122@gmail.com"
] | mdabdurrahmanchowdhury1122@gmail.com |
0ce59ea95422f9ffdfe5d58e6884c9b3aea3c0f9 | 9eb6528606cf9dd011a3ce0c1605b111c9d50955 | /Tony_study/turtle-study/turtle-circle1.py | f1a99704add630dfe7d3e4578772855dcc292681 | [] | no_license | arch123A/luoye | 0ca9f787c7d5e9ba89d2ae602528e68d7d31a636 | ba8e902cefba2c3ccc58bc266cdf9a7eff03a458 | refs/heads/master | 2023-02-02T22:47:00.256065 | 2020-12-13T09:30:33 | 2020-12-13T09:30:33 | 104,022,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | import turtle as t
import time
pen=t.Pen()
pen.up()
# print(help(a.circle))
pen.begin_fill()
# pen.fillcolor" ")
pen.circle(10)
pen.end_fill()
time.sleep(100) | [
"arch@msn.cn"
] | arch@msn.cn |
f0fc3acb68bf9ae1fa73fd72e246b5b8f3401ad3 | f58fe5f505361c7b531ca9fde0f0cb3aa48b5652 | /012/main.py | e90f2216ee821ca20f546a62836094c8f107e9e9 | [] | no_license | elitan/euler | 2ac1891555c133cc788d22d8c040778936849cb3 | 0caa13f74f3b9fb574158f10973cc9dc2586293f | refs/heads/master | 2020-12-25T15:29:46.421623 | 2018-01-17T17:04:32 | 2018-01-17T17:04:32 | 14,663,106 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/bin/py
"""
The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
"""
import math as m
def divisors(n):
dev = 0
for i in range(2, int(m.ceil(n**0.5))):
if n % i == 0:
dev += 1
dev = dev * 2 + 2
#if perfect square
#dev += 1
return dev
i = 1
n = 0
while divisors(n) < 500:
n += i
i += 1
print("Winner: %d" % (n)) | [
"johan@eliasson.me"
] | johan@eliasson.me |
8e5579fe07aaf1de1c48be9ae4db8fd0159327d7 | 5e538961d3b5889c30f81ccbc6d315e0c9c8312e | /apps/xsl/src/xsl/MiscSettingsTab.py | d95fbdabadb7e06b9f60427eb9838574910b2df2 | [] | no_license | WhiteSymmetry/lightlang | 3df11a8c3b6b73bebf9076d0cb70827b685d9af6 | 7510d5dd87fc988fe1b14718bb546daae5baebe6 | refs/heads/master | 2021-01-12T10:42:57.621749 | 2014-04-10T14:14:26 | 2014-04-10T14:14:26 | 72,652,022 | 1 | 0 | null | 2016-11-02T15:13:00 | 2016-11-02T15:12:59 | null | UTF-8 | Python | false | false | 4,635 | py | # -*- coding: utf8 -*-
#
# XSL - graphical interface for SL
# Copyright (C) 2007-2016 Devaev Maxim
#
# This file is part of XSL.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import Qt
import Utils
import Locale
import Settings
import LangsList
import IconsLoader
##### Public classes #####
class MiscSettingsTab(Qt.QWidget) :
def __init__(self, parent = None) :
Qt.QWidget.__init__(self, parent)
self.__main_layout = Qt.QGridLayout()
self.setLayout(self.__main_layout)
#####
self.__settings = Settings.Settings(self)
#####
self.__show_tray_icon_checkbox = Qt.QCheckBox(self)
self.__main_layout.addWidget(self.__show_tray_icon_checkbox, 0, 0, 1, 2)
self.__show_splash_checkbox = Qt.QCheckBox(self)
self.__main_layout.addWidget(self.__show_splash_checkbox, 1, 0, 1, 2)
self.__debug_mode_checkbox = Qt.QCheckBox(self)
self.__main_layout.addWidget(self.__debug_mode_checkbox, 2, 0, 1, 2)
self.__main_layout.setRowStretch(3, 1)
self.__force_main_lang_label = Qt.QLabel(self)
self.__main_layout.addWidget(self.__force_main_lang_label, 4, 0)
self.__force_main_lang_combobox = Qt.QComboBox(self)
self.__main_layout.addWidget(self.__force_main_lang_combobox, 4, 1)
#####
self.translateUi()
### Public ###
def requisites(self) :
return {
"icon" : IconsLoader.icon("configure"),
"title" : Qt.QT_TR_NOOP("Misc"),
}
###
def saveSettings(self) :
self.__settings.setValue("application/misc/show_tray_icon_flag", Qt.QVariant(self.__show_tray_icon_checkbox.isChecked()))
self.__settings.setValue("application/misc/show_splash_flag", Qt.QVariant(self.__show_splash_checkbox.isChecked()))
self.__settings.setValue("application/logger/debug_mode_flag", Qt.QVariant(self.__debug_mode_checkbox.isChecked()))
self.__settings.setValue("application/locale/force_main_lang",
self.__force_main_lang_combobox.itemData(self.__force_main_lang_combobox.currentIndex()).toString())
def loadSettings(self) :
self.__show_tray_icon_checkbox.setChecked(self.__settings.value("application/misc/show_tray_icon_flag", Qt.QVariant(True)).toBool())
self.__show_splash_checkbox.setChecked(self.__settings.value("application/misc/show_splash_flag", Qt.QVariant(True)).toBool())
self.__debug_mode_checkbox.setChecked(self.__settings.value("application/logger/debug_mode_flag").toBool())
###
force_main_lang = self.__settings.value("application/locale/force_main_lang").toString()
for count in xrange(self.__force_main_lang_combobox.count()) :
if ( self.__force_main_lang_combobox.itemData(count).toString() == force_main_lang and
not self.__force_main_lang_combobox.itemText(count).isEmpty() ) :
self.__force_main_lang_combobox.setCurrentIndex(count)
### Private ###
def translateUi(self) :
self.__show_tray_icon_checkbox.setText(tr("Show tray icon"))
self.__show_splash_checkbox.setText(tr("Show splash screen on startup"))
self.__force_main_lang_label.setText(tr("Force language:"))
self.__debug_mode_checkbox.setText(tr("Debug mode (write info to stderr)"))
###
last_index = self.__force_main_lang_combobox.currentIndex()
self.__force_main_lang_combobox.clear()
lang_codes_dict = LangsList.langCodes()
system_lang = Locale.Locale.systemLang()
self.__force_main_lang_combobox.addItem(IconsLoader.icon(Utils.joinPath("flags", system_lang)),
tr("By default (%1)").arg(LangsList.langName(system_lang, lang_codes_dict)), Qt.QVariant(""))
self.__force_main_lang_combobox.insertSeparator(1)
for langs_list_item in Locale.Locale.validLangs() :
self.__force_main_lang_combobox.addItem(IconsLoader.icon(Utils.joinPath("flags", langs_list_item)),
LangsList.langName(langs_list_item, lang_codes_dict), Qt.QVariant(langs_list_item))
self.__force_main_lang_combobox.setCurrentIndex(last_index)
### Handlers ###
def changeEvent(self, event) :
if event.type() == Qt.QEvent.LanguageChange :
self.translateUi()
else :
Qt.QWidget.changeEvent(self, event)
| [
"mdevaev@gmail.com"
] | mdevaev@gmail.com |
b3f1a24691f753afaad91eb52fb038da4b7fd51a | e5333b2e54f1adf2e5bc88a9a242234c5f15851a | /misoclib/com/liteeth/test/udp_tb.py | c7e04c8fd30ae94ea85af0dbf0ce195cda73726c | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hoangt/misoc | 1aaf850c18bab5b18db1fcc788feb96afbbc464e | 6c13879fb605a1ee2bd5a3b35669e093f9a4267b | refs/heads/master | 2021-01-21T02:55:59.398987 | 2015-07-13T15:00:03 | 2015-07-13T15:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,834 | py | from migen.fhdl.std import *
from migen.bus import wishbone
from migen.bus.transactions import *
from migen.sim.generic import run_simulation
from misoclib.com.liteeth.common import *
from misoclib.com.liteeth.core import LiteEthUDPIPCore
from misoclib.com.liteeth.test.common import *
from misoclib.com.liteeth.test.model import phy, mac, arp, ip, udp
ip_address = 0x12345678
mac_address = 0x12345678abcd
class TB(Module):
def __init__(self, dw=8):
self.dw = dw
self.submodules.phy_model = phy.PHY(8, debug=False)
self.submodules.mac_model = mac.MAC(self.phy_model, debug=False, loopback=False)
self.submodules.arp_model = arp.ARP(self.mac_model, mac_address, ip_address, debug=False)
self.submodules.ip_model = ip.IP(self.mac_model, mac_address, ip_address, debug=False, loopback=False)
self.submodules.udp_model = udp.UDP(self.ip_model, ip_address, debug=False, loopback=True)
self.submodules.core = LiteEthUDPIPCore(self.phy_model, mac_address, ip_address, 100000)
udp_port = self.core.udp.crossbar.get_port(0x5678, dw)
self.submodules.streamer = PacketStreamer(eth_udp_user_description(dw))
self.submodules.logger = PacketLogger(eth_udp_user_description(dw))
self.comb += [
Record.connect(self.streamer.source, udp_port.sink),
udp_port.sink.ip_address.eq(0x12345678),
udp_port.sink.src_port.eq(0x1234),
udp_port.sink.dst_port.eq(0x5678),
udp_port.sink.length.eq(64//(dw//8)),
Record.connect(udp_port.source, self.logger.sink)
]
# use sys_clk for each clock_domain
self.clock_domains.cd_eth_rx = ClockDomain()
self.clock_domains.cd_eth_tx = ClockDomain()
self.comb += [
self.cd_eth_rx.clk.eq(ClockSignal()),
self.cd_eth_rx.rst.eq(ResetSignal()),
self.cd_eth_tx.clk.eq(ClockSignal()),
self.cd_eth_tx.rst.eq(ResetSignal()),
]
def gen_simulation(self, selfp):
selfp.cd_eth_rx.rst = 1
selfp.cd_eth_tx.rst = 1
yield
selfp.cd_eth_rx.rst = 0
selfp.cd_eth_tx.rst = 0
for i in range(100):
yield
while True:
packet = Packet([i for i in range(64//(self.dw//8))])
yield from self.streamer.send(packet)
yield from self.logger.receive()
# check results
s, l, e = check(packet, self.logger.packet)
print("shift " + str(s) + " / length " + str(l) + " / errors " + str(e))
if __name__ == "__main__":
run_simulation(TB(8), ncycles=2048, vcd_name="my.vcd", keep_files=True)
run_simulation(TB(16), ncycles=2048, vcd_name="my.vcd", keep_files=True)
run_simulation(TB(32), ncycles=2048, vcd_name="my.vcd", keep_files=True)
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
ad01f8f72fabfb58743d2fd35c9720572beb238a | b4328ab3a6ece7d8f8568948781640c12f48053c | /accounts/admin.py | 79a78aa8dbe8463c995cc587989d4a0fd52c140b | [] | no_license | ochui/rockyshelf | 4de74bb2ecbe235371cafabc594c95d2a6169192 | eaa25f5867e53629e46629ca58c2815c0295a4a5 | refs/heads/master | 2020-08-01T03:25:33.250146 | 2019-09-26T17:55:42 | 2019-09-26T17:55:42 | 210,844,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from accounts.models import CustomUser
@admin.register(CustomUser)
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
list_display = ['username', 'email', 'phone_number']
list_filter = ['date_joined', 'last_login', 'is_active', 'is_staff']
search_fields = ['username', 'email', 'phone_number']
fieldsets = UserAdmin.fieldsets + (
('KYC', {'fields': ('phone_number', 'gender', 'date_of_birth')}),
('Academic', {'fields': ('school', 'faculty', 'department')})
)
add_fieldsets = UserAdmin.add_fieldsets + (
('KYC', {'fields': ('phone_number', 'gender', 'date_of_birth')}),
('Academic', {'fields': ('school', 'faculty', 'department')})
)
model = CustomUser
| [
"ochuiprincewill411@gmail.com"
] | ochuiprincewill411@gmail.com |
85da7ef93e50c892890f2eb8a31704305672719a | 5dc77586e3e0f9de1f032fd2ca68494d8e58928f | /contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_country.py | 3a598c9eeb4f921bde123e0ff136c16049bccb8e | [
"Apache-2.0"
] | permissive | great-expectations/great_expectations | dd7c22e6277d6b08bee3ff38a015e6e8cd434df6 | b0290e2fd2aa05aec6d7d8871b91cb4478e9501d | refs/heads/develop | 2023-09-04T09:30:26.395518 | 2023-09-02T00:00:13 | 2023-09-02T00:00:13 | 103,071,520 | 8,931 | 1,535 | Apache-2.0 | 2023-09-14T19:57:16 | 2017-09-11T00:18:46 | Python | UTF-8 | Python | false | false | 5,763 | py | from typing import Optional
import geonamescache
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_country(country: str):
geocache = geonamescache.GeonamesCache()
dict_of_countries = geocache.get_countries()
list_of_countries = [d for d in dict_of_countries.values()]
list_of_country_names = [item["name"] for item in list_of_countries]
if len(country) > 252:
return False
elif type(country) != str: # noqa: E721
return False
elif country in list_of_country_names:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidCountry(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_country"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_country(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidCountry(ColumnMapExpectation):
"""Expect values in this column to be valid country names.
See https://github.com/yaph/geonamescache for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_countries": [
"Syria",
"Venezuela",
"Sao Tome and Principe",
"Tanzania",
"Uganda",
],
"invalid_countries": [
"",
"1234",
"anarchy",
"Turkey men I stan",
"Frenc",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_countries"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_countries"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_country"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["geonamescache"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidCountry().print_diagnostic_checklist()
| [
"noreply@github.com"
] | great-expectations.noreply@github.com |
724c1d2105517d9d8760bd6be9c4e99ea6501d3c | 51a2d52711300adabde54dd57901d873b3308401 | /py/specter/test/specter_test_suite.py | 668477ba21e0d53ac6dc6e92852b713f98efa15c | [
"BSD-3-Clause"
] | permissive | desihub/specter | 571933df1923dcc8cbc6c67a118023357a6c7b4f | d9b24e363db03841a24d2727a3775fd7b459f6a2 | refs/heads/main | 2023-01-24T11:12:37.203688 | 2023-01-13T04:06:30 | 2023-01-13T04:06:30 | 6,079,897 | 6 | 8 | NOASSERTION | 2023-01-11T18:44:34 | 2012-10-04T18:35:52 | Jupyter Notebook | UTF-8 | Python | false | false | 1,001 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
specter.test.specter_test_suite
===============================
Used to initialize the unit test framework via ``python setup.py test``.
"""
#
from __future__ import absolute_import, division, print_function, unicode_literals
#
import unittest
#
#- This is factored out separately from runtests() so that it can be used by
#- python setup.py test
def specter_test_suite():
"""Returns unittest.TestSuite of specter tests"""
from os.path import dirname
specter_dir = dirname(dirname(__file__))
# print(specter_dir)
return unittest.defaultTestLoader.discover(specter_dir,
top_level_dir=dirname(specter_dir))
def runtests():
"""Run all tests in specter.test.test_*.
"""
#- Load all TestCase classes from specter/test/test_*.py
tests = specter_test_suite()
#- Run them
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == "__main__":
runtests()
| [
"benjamin.weaver@nyu.edu"
] | benjamin.weaver@nyu.edu |
1bb7ceb49897f7f73b38182c93e5cf9ae4dfbe56 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc095/B/4914893.py | 9f289d2cd87d2d83f00a6f0ec932caf15edcfcbf | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | N,X = map(int,input().split())
lists = []
for i in range(N):
m = int(input())
lists.append(m)
X -= sum(lists)
if X > 0:
amari = X // min(lists)
print(len(lists) + amari)
else:
print(len(lists)) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
cd4368d4e14f7ad7bd1ce40c5f6178a7aeae984f | 1c10937afbba2fd4a6c1d306603bc3b7f4900be6 | /aid1901/day3/demo7_surface.py | 9c281b372893452df740c322242eb08f741f85b2 | [] | no_license | zh-en520/- | 5366628ce9d819ed1d29e41e35c0996090df1085 | 1e4697b091a3b321adc3fa2c13192de8fc3686f7 | refs/heads/master | 2020-06-28T21:25:59.080990 | 2019-08-03T07:21:42 | 2019-08-03T07:21:42 | 200,345,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import numpy as np
import matplotlib.pyplot as mp
from mpl_toolkits.mplot3d import axes3d
n = 1000
x,y = np.meshgrid(np.linspace(-3,3,n),np.linspace(-3,3,n))
z = (1-x/2+x**5+y**3) * np.exp(-x**2-y**2)
mp.figure('3D Surface', facecolor='lightgray')
mp.tick_params(labelsize=10)
ax3d = mp.gca(projection='3d')
ax3d.set_xlabel('x',fontsize=12)
ax3d.set_ylabel('y',fontsize=12)
ax3d.set_zlabel('z',fontsize=12)
ax3d.plot_surface(x,y,z,rstride=30,cstride=30,cmap='jet')
mp.show() | [
"zh_en520@163.com"
] | zh_en520@163.com |
748d1d3408f65254fb393d5846e48d2f13a89830 | 7e86a9bd9ec1f82838d114bf71ad0f6d0f12152c | /venv/Lib/site-packages/stellar_sdk/xdr/manage_sell_offer_op.py | eeb133891af91ad48163ea97b2db429074ad9c17 | [
"MIT"
] | permissive | yunoUNo/fini | b39688e7203d61f031f2ae9686845b0beccd9b2a | a833bc64a3aaf94f7268ec6eac690aa68327dd96 | refs/heads/master | 2023-08-05T17:42:48.726825 | 2021-09-29T13:30:32 | 2021-09-29T13:30:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,002 | py | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .asset import Asset
from .int64 import Int64
from .price import Price
__all__ = ["ManageSellOfferOp"]
class ManageSellOfferOp:
"""
XDR Source Code
----------------------------------------------------------------
struct ManageSellOfferOp
{
Asset selling;
Asset buying;
int64 amount; // amount being sold. if set to 0, delete the offer
Price price; // price of thing being sold in terms of what you are buying
// 0=create a new offer, otherwise edit an existing offer
int64 offerID;
};
----------------------------------------------------------------
"""
def __init__(
self,
selling: Asset,
buying: Asset,
amount: Int64,
price: Price,
offer_id: Int64,
) -> None:
self.selling = selling
self.buying = buying
self.amount = amount
self.price = price
self.offer_id = offer_id
def pack(self, packer: Packer) -> None:
self.selling.pack(packer)
self.buying.pack(packer)
self.amount.pack(packer)
self.price.pack(packer)
self.offer_id.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "ManageSellOfferOp":
selling = Asset.unpack(unpacker)
buying = Asset.unpack(unpacker)
amount = Int64.unpack(unpacker)
price = Price.unpack(unpacker)
offer_id = Int64.unpack(unpacker)
return cls(
selling=selling,
buying=buying,
amount=amount,
price=price,
offer_id=offer_id,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "ManageSellOfferOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "ManageSellOfferOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.selling == other.selling
and self.buying == other.buying
and self.amount == other.amount
and self.price == other.price
and self.offer_id == other.offer_id
)
def __str__(self):
out = [
f"selling={self.selling}",
f"buying={self.buying}",
f"amount={self.amount}",
f"price={self.price}",
f"offer_id={self.offer_id}",
]
return f"<ManageSellOfferOp {[', '.join(out)]}>"
| [
"quit5123@gmail.com"
] | quit5123@gmail.com |
539d185255ee3adc8d504415b0c7c41c3d7bb57e | 0fcf15789a28415d274d313e0e00ce122e03f19e | /vdirsyncer/storage/base.py | 1a96ecef4a400915a355629c001e6332c83af9ad | [
"MIT"
] | permissive | eckhart/vdirsyncer | 1b08d7a21056d887a97c3a1925bcd319e723d5e4 | 9d0d174afee16af87bea17d252d95537f11a0554 | refs/heads/master | 2021-01-18T06:21:26.873466 | 2014-07-31T08:54:12 | 2014-07-31T08:54:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,681 | py | # -*- coding: utf-8 -*-
'''
vdirsyncer.storage.base
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 Markus Unterwaditzer & contributors
:license: MIT, see LICENSE for more details.
'''
from .. import exceptions
from vdirsyncer.utils.vobject import Item # noqa
class Storage(object):
'''Superclass of all storages, mainly useful to summarize the interface to
implement.
Terminology:
- ITEM: Instance of the Item class, represents a calendar event, task or
contact.
- HREF: String; Per-storage identifier of item, might be UID. The reason
items aren't just referenced by their UID is because the CalDAV and
CardDAV specifications make this imperformant to implement.
- ETAG: String; Checksum of item, or something similar that changes when
the item does.
Strings can be either unicode strings or bytestrings. If bytestrings, an
ASCII encoding is assumed.
:param collection: If None, the given URL or path is already directly
referring to a collection. Otherwise it will be treated as a basepath
to many collections (e.g. a vdir) and the given collection name will be
looked for.
'''
fileext = '.txt'
storage_name = None # The name used in the config file.
# A value of True means the storage does not support write-methods such as
# upload, update and delete. A value of False means the storage does
# support those methods, but it may also be used in read-only mode.
read_only = False
# The attribute values to show in the representation of the storage.
_repr_attributes = ()
def __init__(self, read_only=None):
if read_only is None:
read_only = self.read_only
if self.read_only and not read_only:
raise ValueError('This storage is read-only.')
self.read_only = bool(read_only)
@classmethod
def discover(cls, **kwargs):
'''Discover collections given a basepath or -URL to many collections.
:param **kwargs: Keyword arguments to additionally pass to the storage
instances returned. You shouldn't pass `collection` here, otherwise
TypeError will be raised.
:returns: Iterable of storages which represent the discovered
collections, all of which are passed kwargs during initialization.
'''
raise NotImplementedError()
def _get_href(self, item):
return item.ident + self.fileext
def __repr__(self):
return '<{}(**{})>'.format(
self.__class__.__name__,
dict((x, getattr(self, x)) for x in self._repr_attributes)
)
def list(self):
'''
:returns: list of (href, etag)
'''
raise NotImplementedError()
def get(self, href):
'''Fetch a single item.
:param href: href to fetch
:returns: (item, etag)
:raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if item can't
be found.
'''
raise NotImplementedError()
def get_multi(self, hrefs):
'''Fetch multiple items.
Functionally similar to :py:meth:`get`, but might bring performance
benefits on some storages when used cleverly.
:param hrefs: list of hrefs to fetch
:raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if one of the
items couldn't be found.
:returns: iterable of (href, item, etag)
'''
for href in hrefs:
item, etag = self.get(href)
yield href, item, etag
def has(self, href):
'''Check if an item exists by its href.
:returns: True or False
'''
try:
self.get(href)
except exceptions.PreconditionFailed:
return False
else:
return True
def upload(self, item):
'''Upload a new item.
:raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if there is
already an item with that href.
:returns: (href, etag)
'''
raise NotImplementedError()
def update(self, href, item, etag):
'''Update an item.
:raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if the etag on
the server doesn't match the given etag or if the item doesn't
exist.
:returns: etag
'''
raise NotImplementedError()
def delete(self, href, etag):
'''Delete an item by href.
:raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` when item has
a different etag or doesn't exist.
'''
raise NotImplementedError()
| [
"markus@unterwaditzer.net"
] | markus@unterwaditzer.net |
ef4f09f43a2211561839ab35971f15db0dc93e6f | ac16a937f32602cf16114463f8e875a972f64c27 | /docs/dolfin/1.4.0/python/source/demo/undocumented/multistage-solver/python/demo_multi-stage-solver.py | 878ab4cdcf016ecb806852e645c82fae2e342902 | [] | no_license | mparno/fenics-web | 2073248da6f9918ffedbe9be8a3433bc1cbb7ffb | 7202752da876b1f9ab02c1d5a5f28ff5da526528 | refs/heads/master | 2021-05-05T04:45:46.436236 | 2016-12-06T20:25:44 | 2016-12-06T20:25:44 | 118,628,385 | 2 | 0 | null | 2018-01-23T15:21:47 | 2018-01-23T15:21:46 | null | UTF-8 | Python | false | false | 2,852 | py | # Copyright (C) 2007 Kristian B. Oelgaard
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg, 2008
# Modified by Johan Hake, 2008
# Modified by Garth N. Wells, 2009
# Modified by Johan Hake, 2013
#
# First added: 2007-11-14
# Last changed: 2013-04-05
#
# This demo solves the time-dependent convection-diffusion equation by
# a SUPG stabilized method. The velocity field used
# in the simulation is the output from the Stokes (Taylor-Hood) demo.
# The sub domains for the different boundary conditions are computed
# by the demo program in src/demo/subdomains.
#
# FIXME: Add shock capturing term and then revert back to the Stokes
# velocity
# FIXME: This demo showcase experimental features of a RKSolver (time integrator)
# FIXME: using a MultiStageScheme. It could be removed or changed anytime.
from dolfin import *
print "RKSolver is temporarily unavailable"
exit(0)
# Load mesh and subdomains
mesh = Mesh("../dolfin_fine.xml.gz")
sub_domains = MeshFunction("size_t", mesh, "../dolfin_fine_subdomains.xml.gz");
h = CellSize(mesh)
# Create FunctionSpaces
Q = FunctionSpace(mesh, "CG", 1)
V = VectorFunctionSpace(mesh, "CG", 2)
# Create velocity Function from file
velocity = Function(V);
File("../dolfin_fine_velocity.xml.gz") >> velocity
# Initialise source function and previous solution function
f = Constant(0.0)
u0 = Function(Q)
# Parameters
T = 5.0
dt = 0.1
t = Constant(0.0)
c = 0.00005
# Test and trial functions
u, v = Function(Q), TestFunction(Q)
# Residual
r = dot(velocity, grad(u)) - c*div(grad(u)) - f
# Galerkin variational problem (rhs)
F = -(v*dot(velocity, grad(u)) + c*dot(grad(v), grad(u)))*dx
# Add SUPG stabilisation terms
vnorm = sqrt(dot(velocity, velocity))
F -= h/(2.0*vnorm)*dot(velocity, grad(v))*r*dx
# Set up boundary condition
g = Expression("(t<=ramp_stop) ? t : 1.0", t=t, ramp_stop=1.0)
bc = DirichletBC(Q, g, sub_domains, 1)
# Output file
out_file = File("results/temperature.pvd")
scheme = BDF1(F, u, t, [bc])
solver = RKSolver(scheme)
# Time-stepping
while float(scheme.t()) < T:
solver.step(dt)
# Plot solution
plot(u)
# Save the solution to file
out_file << (u, float(scheme.t()))
# Hold plot
#interactive()
| [
"johannr@simula.no"
] | johannr@simula.no |
966234d2cd9a29a5a0dd663e2c463d3f3a40d314 | d0be690dfc259c004746237c77521ec4fd30fb52 | /lbl_lontest.py | 1bfcfb2dd28af772c434e77b44346d24d6ae1e8d | [
"Apache-2.0"
] | permissive | rbeyer/scriptorium | 3301741ab6c8ece86dc545b1306bfe7450caf21d | 47e2341c132dc92a33032e5147eba0c26d8ef7fb | refs/heads/master | 2023-07-19T20:16:46.649746 | 2023-07-11T16:11:32 | 2023-07-11T16:11:32 | 91,381,585 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,363 | py | #!/usr/bin/env python
"""Reads a PDS3 INDEX or CUMINDEX, and helps determine what longitude system
it might be in."""
# Copyright 2021, Ross A. Beyer (rbeyer@rossbeyer.net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import sys
from pathlib import Path
import pvl
from lbl2sql import get_columns
def arg_parser():
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
"-a", "--all",
action="store_true",
help="Completely check the file instead of failing fast."
)
parser.add_argument(
"-l", "--label",
type=Path,
help="PDS3 Label file. If not given, this program will look in the "
"directory with the index file, and see if it can find an "
"appropriate .LBL file."
)
parser.add_argument(
"index",
type=Path,
help="A PDS index.tab or a cumindex.tab file."
)
return parser
def main():
args = arg_parser().parse_args()
if args.label is None:
for suffix in (".LBL", ".lbl"):
p = args.index.with_suffix(".LBL")
if p.exists():
args.label = p
break
else:
print(
"Could not guess an appropriate LBL file, please "
"use -l explicitly."
)
sys.exit(1)
label = pvl.load(args.label)
columns = get_columns(label)
if "CENTER_LONGITUDE" not in columns:
print("CENTER_LONGITUDE not in columns. Quitting.")
return -1
with open(args.index, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=columns)
if args.all:
lon360 = None
lon180 = None
for row in reader:
lon = float(row["CENTER_LONGITUDE"])
if lon > 180:
lon360 = row["CENTER_LONGITUDE"]
elif lon < 0:
lon180 = row["CENTER_LONGITUDE"]
if lon360 and lon180 is None:
print("Found longitudes greater than 180. Probably Lon360.")
elif lon180 and lon360 is None:
print("Found longitudes less than 0. Probably Lon180.")
elif lon180 is not None and lon360 is not None:
print(
"Found longitudes less than 0 and greater than 180, "
"which is messed up."
)
else:
print("All longitudes were between 0 and 180, weird.")
else:
for row in reader:
lon = float(row["CENTER_LONGITUDE"])
if lon > 180 or lon < 0:
print(f'Found CENTER_LONGITUDE of {row["CENTER_LONGITUDE"]}')
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"rbeyer@rossbeyer.net"
] | rbeyer@rossbeyer.net |
17a36b19dd6ab80b882c1f4b8536eecb5b143d26 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5670465267826688_1/Python/Renelvon/solve.py | b550930afd0bdd40a51ce994a3275d3db77fcd60 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,649 | py | #!/usr/bin/env python3
import functools
import sys
_QUATERNION_MULT_TABLE = {
("1", "1"): "1",
("1", "i"): "i",
("1", "j"): "j",
("1", "k"): "k",
("i", "1"): "i",
("i", "i"): "-1",
("i", "j"): "k",
("i", "k"): "-j",
("j", "1"): "j",
("j", "i"): "-k",
("j", "j"): "-1",
("j", "k"): "i",
("k", "1"): "k",
("k", "i"): "j",
("k", "j"): "-i",
("k", "k"): "-1"
}
def main():
input_file = sys.argv[1]
solve(input_file)
def solve(input_file):
with open(input_file) as f_in:
testcases = int(next(f_in))
for i in range(testcases):
l, x = tuple(int(num) for num in next(f_in).split())
spelling = next(f_in).rstrip()
output = "YES" if can_be_written(spelling, l, x) else "NO"
print("Case #%d: %s" % (i + 1, output))
def can_be_written(spelling, l, x):
if l * x < 3:
return False
if x > 12:
return large_can_be_written(spelling, l, x)
else:
return small_can_be_written(spelling, l, x)
def small_can_be_written(spelling, l, x):
entire_spelling = spelling * x
iidx = find_minimal_i_prefix(entire_spelling) # position one past the end
kidx = find_minimal_k_suffix(entire_spelling) # position at start
if 0 < iidx < kidx < l * x:
return "j" == functools.reduce(
mult_quarts, entire_spelling[iidx:kidx], "1"
)
return False
def large_can_be_written(spelling, l, x):
quad_spelling = spelling * 4
iidx = find_minimal_i_prefix(quad_spelling) # position one past the end
kidx = find_minimal_k_suffix(quad_spelling) # position at start
if 0 < iidx < l * 4 and 0 < kidx < l * 4:
_, q = divmod(x - 8, 4)
residual = "".join((
quad_spelling[iidx:],
spelling * q,
quad_spelling[:kidx]
))
return "j" == functools.reduce(mult_quarts, residual, "1")
return False
def find_minimal_i_prefix(qtext):
acc = "1"
idx, ltext = 0, len(qtext)
while acc != "i" and idx < ltext:
acc = mult_quarts(acc, qtext[idx])
idx += 1
return idx
def find_minimal_k_suffix(qtext):
acc = "1"
idx = len(qtext)
while acc != "k" and idx > 0:
idx -= 1
acc = mult_quarts(qtext[idx], acc)
return idx
def mult_quarts(q1, q2):
# Use only "-" sign, omit "+"
abs_val = _QUATERNION_MULT_TABLE[(q1[-1], q2[-1])]
if (len(q1) + len(q2)) % 2 == 0:
return abs_val
elif len(abs_val) == 2:
return abs_val[-1]
else:
return "-" + abs_val
if __name__ == '__main__':
main()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
601b0f0c6249de30d39f9c6bcca5bccafa39d142 | 6b20a0d3fc814cff0adafcffcaa193eb91677897 | /data/phys/fill_6297/config_common.py | 72dfc824d5f1784597b9bf16698c4ea5e8b83fbc | [] | no_license | jan-kaspar/analysis_ctpps_alignment_2017_postTS2 | 3a942f62ec8ea8f2e124e09831dfe87f54980f26 | 66b1e0cd77643c143797cee53fc36b8652a6becf | refs/heads/master | 2021-05-04T12:55:09.753837 | 2018-08-21T11:31:19 | 2018-08-21T11:31:19 | 120,303,385 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | import sys
import os
import FWCore.ParameterSet.Config as cms
sys.path.append(os.path.relpath("../../../../../"))
from config_base import config
config.sector_45.cut_h_c = -38.57
config.sector_45.cut_v_c = 1.63
config.sector_56.cut_h_c = -39.34
config.sector_56.cut_v_c = 1.41
| [
"jan.kaspar@cern.ch"
] | jan.kaspar@cern.ch |
26c39c3f10cf4e8e3c1a1f83e5764c9d78805969 | 21b131564f9708d4667dc6dc0043ce6173dfa83c | /leetcode/Symmetric Tree.py | b6335a454916b0a33c3576aab48d96da4023aaa9 | [] | no_license | hermitbaby/leetcode | 5f1b6854c181adabb00951a56dd5235316ab8a45 | 5b76e2ff61a42cd366447d4a7cf1f4f9c624369b | refs/heads/master | 2020-12-24T08:37:25.846329 | 2017-04-28T23:27:57 | 2017-04-28T23:27:57 | 34,924,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
#
# For example, this binary tree is symmetric:
#
# 1
# / \
# 2 2
# / \ / \
# 3 4 4 3
# But the following is not:
# 1
# / \
# 2 2
# \ \
# 3 3
# Note:
# Bonus points if you could solve it both recursively and iteratively.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {boolean}
def isSymmetric0(self, root):
if root == None:
return True
else:
return self.isSym(root.left, root.right)
def isSym(self, left, right):
if left == None and right == None:
return True
elif left == None or right == None:
return False
elif left.val != right.val:
return False
else:
return self.isSym(left.left, right.right) and \
self.isSym(left.right, right.left)
def isSymmetric(self, root):
if root == None:
return True
stack = [[root.left, root.right]]
while len(stack) > 0:
left, right = stack.pop()
if left == None and right == None:
continue
if left == None or right == None:
return False
if left.val == right.val:
stack.append([left.left, right.right])
stack.append([left.right, right.left])
else:
return False
return True | [
"lchen@choicestream.com"
] | lchen@choicestream.com |
f9effb28c711e475e08298c71f5a21a49d7acde2 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/yangydeng_tc_koubei_newBird/tc_koubei_newBird-master/dyy/MLcodes/MLcode_0.py | a7977400f7727ac2ae6150328cbb99d500a9d311 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,026 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 09 10:21:49 2017
@author: Administrator
"""
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
import sys
sys.path.append('../tools')
from tools import get_result
day_time = '_02_16_3'
train_x = pd.read_csv('../train_0/train_x'+day_time+'.csv')
train_y = pd.read_csv('../train_0/train_y'+day_time+'.csv')
test_x = pd.read_csv('../test_0/test_x'+day_time+'.csv')
#RF = RandomForestRegressor(n_estimators=1200,random_state=1,n_jobs=-1,min_samples_split=2,min_samples_leaf=2,max_depth=25)
#RF.fit(train_x,train_y)
#pre = (RF.predict(test_x)).round()
ET = ExtraTreesRegressor(n_estimators=1200,random_state=1,n_jobs=-1,min_samples_split=2,min_samples_leaf=2,max_depth=25,max_features=270)
ET.fit(train_x,train_y)
pre = (ET.predict(test_x)).round()
result = get_result(pre)
result.to_csv('../results/result'+day_time+'.csv',index=False,header=False)
| [
"659338505@qq.com"
] | 659338505@qq.com |
615663416663253f049b531beb1c929695c6c61f | 6fd26735b9dfd1d3487c1edfebf9e1e595196168 | /2016/day08a_tiny_code_displaying_screen.py | b29fc623f52b71abd419ce76166fd185ffdc2a7b | [
"BSD-3-Clause"
] | permissive | Kwpolska/adventofcode | bc3b1224b5272aa8f3a5c4bef1d8aebe04dcc677 | 8e55ef7b31a63a39cc2f08b3f28e15c2e4720303 | refs/heads/master | 2021-01-10T16:48:38.816447 | 2019-12-03T20:46:07 | 2019-12-03T20:46:07 | 47,507,587 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | #!/usr/bin/env python3
import numpy
with open("input/08.txt") as fh:
file_data = fh.read()
def solve(data, width, height):
array = numpy.zeros((height, width), numpy.bool)
lines = [i for i in data.split('\n') if i]
l = len(lines)
# kwpbar.pbar(0, l)
for n, line in enumerate(lines):
if line.startswith('rect'):
# OPERATION = rect
a, b = (int(i) for i in line[4:].split('x'))
for x in range(a):
for y in range(b):
array[y][x] = True
else:
# OPERATION = rotate
_, t, d, _, b = line.split()
a = int(d[2:])
b = int(b)
if t == 'column':
array[:,a] = numpy.roll(array[:,a], b)
else:
array[a] = numpy.roll(array[a], b)
return numpy.count_nonzero(array)
test_data = "rect 3x2\nrotate column x=1 by 1\nrotate row y=0 by 4\nrotate column x=1 by 1"
test_output = solve(test_data, 7, 3)
test_expected = 6 # ".#..#.#\n#.#....\n.#....."
print(test_output, test_expected)
assert test_output == test_expected
print(solve(file_data, 50, 6))
| [
"kwpolska@gmail.com"
] | kwpolska@gmail.com |
fad8744022c9adaad6c09d096ffac46929675f6e | 0a65d42f4f0e491cb2aada408401b94909f821c2 | /mhiap/mhiap_landing/urls.py | f01523ec5db9b36db84dafc291186a45715832a5 | [] | no_license | jmadlansacay/_Office | 3acde7655784e91c7dcecfc853d4f36cdfeef028 | 7f46449b9f7e8e892e2e0025ba493259197fa592 | refs/heads/main | 2023-07-28T10:23:54.680822 | 2021-09-11T02:28:07 | 2021-09-11T02:28:07 | 379,155,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.landing_index, name ='landing_index'),
] | [
"Q034800@mhi.co.jp"
] | Q034800@mhi.co.jp |
c0a91143554164dc10207cb1baec7850f8e5b7c4 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class3164.py | 75bd4fadf578af609572e00a72c5b264519b5bbc | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=28
prog.cz(input_qubit[0],input_qubit[3]) # number=29
prog.h(input_qubit[3]) # number=30
prog.z(input_qubit[3]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.rx(2.708052867394402,input_qubit[1]) # number=11
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.y(input_qubit[2]) # number=16
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.h(input_qubit[3]) # number=25
prog.z(input_qubit[1]) # number=20
prog.z(input_qubit[3]) # number=31
prog.h(input_qubit[0]) # number=22
prog.cz(input_qubit[1],input_qubit[0]) # number=23
prog.h(input_qubit[0]) # number=24
prog.z(input_qubit[2]) # number=15
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.y(input_qubit[2]) # number=18
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.x(input_qubit[2]) # number=35
prog.cx(input_qubit[1],input_qubit[0]) # number=27
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class3164.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
bcfd03c9f8baeb8050157a15878fa97165389a4e | 440f868dd3d3bfe9bbeb8350eeac19741550537c | /migrations/versions/0a48b22fada9_modify_comments_table_migration.py | 690ff91979ef90c03e948d56b715d042030420b8 | [
"MIT"
] | permissive | Isaacg94/personal-blog | 47cc9036a85d0928d816a523188636be1a1c6ed5 | be4bc49655c5dd17664e7532ae9ceef31161157a | refs/heads/master | 2022-12-14T00:26:46.175694 | 2021-04-30T23:43:43 | 2021-04-30T23:43:43 | 217,496,372 | 0 | 0 | null | 2022-12-08T06:47:24 | 2019-10-25T09:11:21 | Python | UTF-8 | Python | false | false | 1,098 | py | """modify comments table Migration
Revision ID: 0a48b22fada9
Revises: 8eb8a629f46e
Create Date: 2019-10-28 20:39:15.260269
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0a48b22fada9'
down_revision = '8eb8a629f46e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('comment_by', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
# ### end Alembic commands ###
| [
"7248zack@gmail.com"
] | 7248zack@gmail.com |
e6fecc5dfd263374da30baa5ca08dcfc36cf843e | e9b2a3af17a82ec4bebf3b0ca24e670885de951e | /neo/VM/RandomAccessStack.py | 7cd230c31d8ae5c134ab231ddc132e70785f6ac5 | [
"LicenseRef-scancode-free-unknown",
"MIT"
] | permissive | imusify/crowdfunding-blockchain-middleware | 379efb5654f36ee3028fdd50b7e4ae649973ca6e | 35d967b005741208a7947b2edface5158d177413 | refs/heads/imu2 | 2022-06-02T06:39:05.971622 | 2018-03-03T09:59:38 | 2018-03-03T09:59:38 | 120,617,579 | 3 | 2 | MIT | 2021-06-01T21:55:40 | 2018-02-07T13:18:57 | Python | UTF-8 | Python | false | false | 1,682 | py | from neo.VM.InteropService import StackItem
class RandomAccessStack():
_list = []
_name = 'Stack'
def __init__(self, name='Stack'):
self._list = []
self._name = name
@property
def Count(self):
return len(self._list)
@property
def Items(self):
return self._list
def Clear(self):
self._list = []
def GetEnumerator(self):
return enumerate(self._list)
def Insert(self, index, item):
index = int(index)
if index < 0 or index > self.Count:
raise Exception("Invalid list operation")
self._list.insert(index, item)
def Peek(self, index=0):
index = int(index)
if index >= self.Count:
raise Exception("Invalid list operation")
return self._list[self.Count - 1 - index]
def Pop(self):
# self.PrintList("POPSTACK <- ")
return self.Remove(0)
def PushT(self, item):
if not type(item) is StackItem and not issubclass(type(item), StackItem):
item = StackItem.New(item)
self._list.append(item)
def Remove(self, index):
index = int(index)
if index < 0 or index >= self.Count:
raise Exception("Invalid list operation")
item = self._list.pop(self.Count - 1 - index)
return item
def Set(self, index, item):
index = int(index)
if index < 0 or index > self.Count:
raise Exception("Invalid list operation")
if not type(item) is StackItem and not issubclass(type(item), StackItem):
item = StackItem.New(item)
self._list[self.Count - index - 1] = item
| [
"tasaunders@gmail.com"
] | tasaunders@gmail.com |
28a9f6ea1e2c3d5c96b687baee604ce54d312130 | d7ccb4225f623139995a7039f0981e89bf6365a4 | /.history/accounts/models_20211010000215.py | 35292b7aa656d3cd1f02443e0c8db84e9d132c67 | [] | no_license | tonnymuchui/django-mall | 64fd4abc3725c1bd0a3dcf20b93b490fe9307b37 | 55c083d8433be3c77adc61939cd197902de4ce76 | refs/heads/master | 2023-08-23T04:59:20.418732 | 2021-10-13T15:59:37 | 2021-10-13T15:59:37 | 415,668,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
# Create your models here.
class Account(AbstractBaseUser):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
username = models.CharField(max_length=50, unique=True)
email = models.EmailField(max_length=100, unique=True)
phone_number = models.CharField(max_length=50)
# required
date_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_superadmin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
de | [
"tonykanyingah@gmail.com"
] | tonykanyingah@gmail.com |
d4fe72b1336081d99ef0dabd6ff3b10c865f77a2 | af327b6738acf8ee9383dc4fd9bc82bf522a9afb | /第6章 结构化数据分析工具Pandas/code_6.4.5.py | 83223ac8d27510a3b5a381e3be239d285164f8b3 | [] | no_license | xufive/ways2grow | c76b03d56645ae1731bcb450ab30e0cd171f0570 | 3abf8b942bfe997d73b73fe1fb61ff81ad74d093 | refs/heads/master | 2022-12-28T12:08:01.984345 | 2020-10-16T10:01:53 | 2020-10-16T10:01:53 | 265,993,044 | 16 | 9 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | # -*- encoding: utf-8 -*-
"""
6.4.5 日期时间索引对象
"""
import pandas as pd
print(pd.DatetimeIndex(['2020-03-10', '2020-03-11', '2020-03-12']))
print(pd.DatetimeIndex(pd.Index(['2020-03-10', '2020-03-11', '2020-03-12'])))
idx = pd.Index(['2020-03-10', '2020-03-11', '2020-03-12'])
sdt = pd.Series(['2020-03-10', '2020-03-11', '2020-03-12'])
print(idx)
print(sdt)
print(pd.DatetimeIndex(idx))
print(pd.DatetimeIndex(sdt))
print(pd.to_datetime(['2020-03-10', '2020-03-11', '2020-03-12', '2020-03-13']))
print(pd.to_datetime(idx))
print(pd.to_datetime(sdt))
print(pd.date_range(start='2020-05-12', end='2020-05-18'))
print(pd.date_range(start='2020-05-12 08:00:00', periods=6, freq='3H'))
print(pd.date_range(start='08:00:00', end='9:00:00', freq='15T'))
| [
"xufive@sdysit.com"
] | xufive@sdysit.com |
e15a973ef84fabaca83fcc2b58c755850c5f1177 | 877bd731bc97f220c363914d1e66970e2d9e599e | /python_stack/_django/django_intro/books_authors_proj/books_authors_app/urls.py | 2cbca658d4748c9158dd939713ddc91444e95267 | [] | no_license | mpresto/dojo | eaccc08465298d35ae5a8e0d60e547a90bc24e05 | aec14ee041950eea7c35003fa03b0728b4606754 | refs/heads/master | 2021-05-26T00:15:16.551562 | 2020-10-04T00:09:48 | 2020-10-04T00:09:48 | 253,975,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from django.urls import path
from . import views
urlpatterns = [
path('books', views.book),
path('create_book', views.process_book),
path('books/<int:id>', views.detail_book),
path('add_author', views.add_author),
path('authors', views.author),
path('create_author', views.process_author),
path('authors/<int:id>', views.detail_author),
path('add_book', views.add_book),
] | [
"monty.preston5@gmail.com"
] | monty.preston5@gmail.com |
29089798558c29e3df1bc3539daeac08a378841d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_cockier.py | 5759063c1290757ca3ab8527642ce07e621e48af | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
#calss header
class _COCKIER():
def __init__(self,):
self.name = "COCKIER"
self.definitions = cocky
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['cocky']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
da5fb1954750247d10046085ec61041faa23735d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_103/ch39_2020_04_01_14_56_02_997196.py | a35791d49c98122f6542b004e3643a0780ffd8b9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | def collatz(primeiro_termo):
novo_termo=0
if primeiro_termo<1000:
if primeiro_termo%2==0:
novo_termo=primeiro_termo/2
else:
novo_termo=(primeiro_termo*3)+1
n=2
lista=[0]*n
lista[0]=primeiro_termo
lista[1]=novo_termo
while novo_termo>1:
if novo_termo%2==0:
novo_termo=novo_termo/2
else:
novo_termo=(novo_termo*3)+1
n+=1
lista.append(novo_termo)
return len(lista)
tamanho=0
primeiro=0
i=1
while i<1000:
m=collatz(i)
if m> tamanho:
tamanho=m
primeiro=i
i+=1
return primeiro
| [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.