blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c6dc8195de79b31a793ca2f5dc418d94ff91f64 | b78ef082335b0a901b3f028746055fc6308990a2 | /Algorithms/Leetcode/1079 - Letter Tile Possibilities.py | 8d6c8b6e16e83f525e77681c3c51395c04c71c9d | [] | no_license | timpark0807/self-taught-swe | 1a968eab63f73cea30ef2379ffab53980743ed1a | bbfee57ae89d23cd4f4132fbb62d8931ea654a0e | refs/heads/master | 2021-08-14T23:31:14.409480 | 2021-08-10T06:36:06 | 2021-08-10T06:36:06 | 192,797,733 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | class Solution:
def numTilePossibilities(self, tiles):
letters = list(tiles)
seen = set()
answers = []
self.backtrack(letters, seen, answers, [])
return len(answers)
def backtrack(self, letters, seen, answers, curr):
if curr != [] and ''.join(list(curr)) not in answers:
answers.append(''.join(list(curr)))
for i in range(len(letters)):
if i not in seen:
seen.add(i)
self.backtrack(letters, seen, answers, curr + [letters[i]])
seen.remove(i)
s = Solution()
answer = s.numTilePossibilities("IMSLHTX")
print(answer)
| [
"timpark0807@gmail.com"
] | timpark0807@gmail.com |
88cd51908a2fcde82064b30f691ac7fb66f79cb6 | 1793aac7856809ed8e121955056154de50a2ae8f | /c13_o_reilly/p26_unix_match.py | 3e5073d57f190763778544e02c67d56fbb10ef3f | [] | no_license | ZpRoc/checkio | fe4af88f116f75f8197cd31d857ae5262615b6af | 126647f8971732bdf13d49092df178654dee889b | refs/heads/main | 2023-03-22T23:52:37.330312 | 2021-03-10T02:28:56 | 2021-03-10T02:28:56 | 335,871,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # ---------------------------------------------------------------- #
# Unix Match
# Filename patterns matching
# Text, parsing, string
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
| [
"zheng_roc@163.com"
] | zheng_roc@163.com |
2d2887fc3228cd41ff2cbef9067eb25f091d8b7c | e06c7fd594c52425ab7fc5498c07ae14daf9578b | /api/view/locations.py | f57ce6184150065f58cbad03b00ee6c32fcfcca6 | [] | no_license | rwheeler-7864/simplenight-api | bc35560eca1e1c25092a1bcdc4af1633367413b8 | 602646911a0155df5b70991d1445c10cee18cd33 | refs/heads/master | 2023-03-12T03:10:51.516499 | 2021-02-25T20:40:44 | 2021-02-25T20:40:44 | 342,370,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.request import Request
from api.locations import location_service
from api.view.default_view import _response
class LocationsViewSet(viewsets.ViewSet):
permission_classes = (AllowAny,)
@action(detail=False, url_path="cities", methods=["GET"], name="Search Locations by Prefix")
def find_all(self, request: Request):
lang_code = request.GET.get("lang_code")
country_code = request.GET.get("country_code")
return _response(location_service.find_all_cities(country_code=country_code, language_code=lang_code))
@action(detail=False, url_path="prefix", methods=["GET"], name="Search Locations by Prefix")
def find_by_prefix(self, request: Request):
lang_code = request.GET.get("lang_code", "en")
prefix = request.GET.get("prefix")
locations = location_service.find_by_prefix(prefix, lang_code)
return _response(locations)
@action(detail=False, url_path="id", methods=["GET"], name="Search Locations by Prefix")
def find_by_id(self, request: Request):
lang_code = request.GET.get("lang_code", "en")
geoname_id = request.GET.get("location_id")
locations = location_service.find_city_by_simplenight_id(geoname_id, lang_code)
return _response(locations)
| [
"randmwheeler@gmail.com"
] | randmwheeler@gmail.com |
5e383048b494ba192b302e7a04ab7666c6bad659 | 18a2e479f4edef528fa7803723822f9f5974e5f8 | /30_draft_randpart_daily_gather.py | 1e5364cb1152f4594cbcf24008706c34677a3b13 | [] | no_license | wpower12/RedditCountyBias | ee25cb870b807466ed53225471e9ac6f5eec1cd0 | 59f0b6642f20547ac129b47496ef3ca0ac135a39 | refs/heads/master | 2023-04-04T22:24:24.258295 | 2021-04-15T17:50:18 | 2021-04-15T17:50:18 | 329,438,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | import pymysql as sql
import progressbar
import rcdTools.DataCollectDaily as dc
SUB_PER_COHORT = 10
DAYS_PER_COHORT = 10
USERS_PER_COHORT = 10
conn = sql.connect(host='localhost',
user='bill',
password='password',
database='reddit_data')
iteration = 1
running = True
while running:
## SUBREDDIT PASS - collecting more users (hopefully)
# Do a pass over each day, for each cohort.
lsub_cohorts = dc.getRandPartSubreddits(SUB_PER_COHORT, conn)
for day in range(1, 366):
daily_total = 0
daily_pbar = progressbar.ProgressBar(max_value=len(lsub_cohorts),
prefix="Subs: Day {}, Iter {}: ".format(day, iteration),
redirect_stdout=True)
i = 0
for lsub_cohort in lsub_cohorts:
u_count = dc.subredditCohortGather(conn, lsub_cohort, 2020, day)
daily_total += u_count
i += 1
daily_pbar.update(i)
daily_pbar.finish()
print("Subs: Day {}, Iter {}: {}".format(day, iteration, daily_total))
## USER PASS
# Do a pass over each day, for a new set of random users from that day.
for day in range(1, 366):
# Forgot I don't handle the 'sets' in the gather yet.
user_cohorts = dc.getRandPartUseryds([day], USERS_PER_COHORT, conn)
daily_total = 0
daily_pbar = progressbar.ProgressBar(max_value=len(user_cohorts),
prefix="AS's: Day {}, Iter {}: ".format(day, iteration),
redirect_stdout=True)
i = 0
for user_cohort in user_cohorts:
as_count = dc.userydASCohortGather(conn, user_cohort, 2020, day)
daily_total += as_count
i += 1
daily_pbar.update(i)
daily_pbar.finish()
print("AS's: Day {}, Iter {}: {}".format(day, iteration, daily_total))
iteration += 1
# for ds in day_sets:
# user_cohorts = dc.getRandPartUseryds(ds, USERS_PER_COHORT, conn)
# print(len(user_cohorts))
| [
"willpowe@gmail.com"
] | willpowe@gmail.com |
d1f2f50c322a99b743288f97107bd32c7eaa939a | b2ba670818623f8ab18162382f7394baed97b7cb | /test-data/AndroidSlicer/Passwordmaker/DD/25.py | 26311ea9e3edcae3541f35a7bb9290be9c8cf9c2 | [
"MIT"
] | permissive | hsumyatwin/ESDroid-artifact | 012c26c40537a79b255da033e7b36d78086b743a | bff082c4daeeed62ceda3d715c07643203a0b44b | refs/heads/main | 2023-04-11T19:17:33.711133 | 2022-09-30T13:40:23 | 2022-09-30T13:40:23 | 303,378,286 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | #start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'org.passwordmaker.android'
activity ='org.passwordmaker.android.PasswordMakerProForAndroidActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.3)
MonkeyRunner.sleep(0.3)
device.touch(41,980, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.3)
device.touch(170,1176, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.3)
device.touch(960,1525, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.3)
device.touch(247,1848, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.3)
device.touch(244,1859, 'DOWN_AND_UP')
| [
"hsumyatwin@gmail.com"
] | hsumyatwin@gmail.com |
078016a98cfc4fe6f2ac01d5479dd26519279e0e | 4b41a76c5c366ba2daa30843acea16609b8f5da7 | /2017/15/AoC17_15_1.py | 31b59fcbdaeb1644927b75ea06ab82ab44a0bfc7 | [] | no_license | grandfoosier/AdventOfCode | c4706cfefef61e80060cca89b0433636e42bf974 | a43fdd72fe4279196252f24a4894500a4e272a5d | refs/heads/master | 2020-06-11T12:36:48.699811 | 2019-01-14T23:44:44 | 2019-01-14T23:44:44 | 75,665,958 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | class Generator(object):
def __init__(self, M, D):
self.M = M; self.D = D
def seed(self, N): self.N = N
def out(self):
self.N = (self.N * self.M) % self.D
return ("%08x" % self.N)[-4:]
d = 2147483647
A = Generator(16807, d, 4); B = Generator(48271, d, 8)
# A.seed(65); B.seed(8921)
A.seed(722); B.seed(354)
def comp_gens():
a = A.out(); b = B.out()
return a == b
def comp_for_i_print(i):
c = 0; x = 0
while True:
print "\rGenerating pairs: %i%%" % (x * 100 / i), c,
for j in range(i/100):
if x == i: return c
c += comp_gens()
x += 1
def comp_for_i(i):
c = 0
for j in range(i): c += comp_gens()
return c
print ""
c = comp_for_i_print(40000000)
print "\rGenerating pairs: 100%", c
print "\n"
| [
"noreply@github.com"
] | grandfoosier.noreply@github.com |
d37664ea85b89e9902d716dd0654767e12ad40b5 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-rms/huaweicloudsdkrms/v1/model/show_resource_history_response.py | 5057cb623b64b131ade2ec4ffd4d0c934449eea7 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,646 | py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowResourceHistoryResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'items': 'list[HistoryItem]',
'page_info': 'PageInfo'
}
attribute_map = {
'items': 'items',
'page_info': 'page_info'
}
def __init__(self, items=None, page_info=None):
"""ShowResourceHistoryResponse - a model defined in huaweicloud sdk"""
super(ShowResourceHistoryResponse, self).__init__()
self._items = None
self._page_info = None
self.discriminator = None
if items is not None:
self.items = items
if page_info is not None:
self.page_info = page_info
@property
def items(self):
"""Gets the items of this ShowResourceHistoryResponse.
资源历史列表
:return: The items of this ShowResourceHistoryResponse.
:rtype: list[HistoryItem]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ShowResourceHistoryResponse.
资源历史列表
:param items: The items of this ShowResourceHistoryResponse.
:type: list[HistoryItem]
"""
self._items = items
@property
def page_info(self):
"""Gets the page_info of this ShowResourceHistoryResponse.
:return: The page_info of this ShowResourceHistoryResponse.
:rtype: PageInfo
"""
return self._page_info
@page_info.setter
def page_info(self, page_info):
"""Sets the page_info of this ShowResourceHistoryResponse.
:param page_info: The page_info of this ShowResourceHistoryResponse.
:type: PageInfo
"""
self._page_info = page_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowResourceHistoryResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3983855edf088530d31c48a6e45d3bfb4e166c98 | 61fd2d345b92a71c29e9be60e17332b7d6179013 | /EventManagement/migrations/0008_auto_20160926_1631.py | 2e5ff46325478bccab3a1570e883abc89c41ab37 | [] | no_license | willsion/Dtop | c3285e2ec616a229416935646b49c0552f68ba09 | 82d591253424086bded3dff9bb4b74a7b5e7c521 | refs/heads/master | 2021-01-13T07:29:14.377572 | 2016-10-11T01:46:24 | 2016-10-11T01:46:24 | 71,310,645 | 1 | 0 | null | 2016-10-19T02:26:23 | 2016-10-19T02:26:23 | null | UTF-8 | Python | false | false | 641 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-26 08:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EventManagement', '0007_auto_20160926_1535'),
]
operations = [
migrations.RemoveField(
model_name='items',
name='edit_time',
),
migrations.AddField(
model_name='items',
name='finish_time',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='\u4e8b\u9879\u7ed3\u675f\u65f6\u95f4'),
),
]
| [
"mzpy_1119@126.com"
] | mzpy_1119@126.com |
1b32904179892326050938867dbde6d0cb669258 | 09ae3f372d1000f118ad80874870ae420a4be66f | /scikit-learn-master/sklearn/preprocessing/base.py | 29a1bd87dc8ee0392d87294682d98a5feb6e4319 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | lqkweb/learnMLflow | 998f80c3828879b8d542125bc95c6345b8e9b29a | 13c5decaebba95b1b90f92021be35e343b4764af | refs/heads/master | 2022-10-18T06:17:23.584172 | 2019-01-18T09:51:38 | 2019-01-18T09:51:38 | 166,145,472 | 2 | 0 | Apache-2.0 | 2022-09-30T18:26:17 | 2019-01-17T02:22:29 | Python | UTF-8 | Python | false | false | 2,936 | py | """Helpers for preprocessing"""
import numpy as np
from scipy import sparse
from ..utils import check_array
from ..utils.validation import FLOAT_DTYPES
def _transform_selected(X, transform, dtype, selected="all", copy=True,
retain_order=False):
"""Apply a transform function to portion of selected features.
Returns an array Xt, where the non-selected features appear on the right
side (largest column indices) of Xt.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
dtype : number type
Desired dtype of output.
copy : boolean, default=True
Copy X even if it could be avoided.
selected : "all" or array of indices or mask
Specify which features to apply the transform to.
retain_order : boolean, default=False
If True, the non-selected features will not be displaced to the right
side of the transformed array. The number of features in Xt must
match the number of features in X. Furthermore, X and Xt cannot be
sparse.
Returns
-------
Xt : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if sparse.issparse(X) and retain_order:
raise ValueError("The retain_order option can only be set to True "
"for dense matrices.")
if isinstance(selected, str) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
# The columns of X which are not transformed need
# to be casted to the desire dtype before concatenation.
# Otherwise, the stacking will cast to the higher-precision dtype.
X_not_sel = X[:, ind[not_sel]].astype(dtype)
if retain_order:
if X_sel.shape[1] + X_not_sel.shape[1] != n_features:
raise ValueError("The retain_order option can only be set to True "
"if the dimensions of the input array match the "
"dimensions of the transformed array.")
# Fancy indexing not supported for sparse matrices
X[:, ind[sel]] = X_sel
return X
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
| [
"leiqk@dxy.cn"
] | leiqk@dxy.cn |
9fc1a0478cc1926799cda20f1e638da6df34df60 | a3a3e1298db9555eda37f8da0c74a437d897cb1f | /compiled/Python2/Euler_Problem-044.py | 001e401c08677505fac6ef8c513ff6e566121155 | [
"MIT"
] | permissive | LStepanek/Project-Euler_Befunge | 58f52254ee039ef6a5204fc65e62426c5e9d473a | f35fb2adecd737e410dee7b89b456cd61b25ce78 | refs/heads/master | 2021-01-01T17:51:52.413415 | 2017-05-03T17:23:01 | 2017-05-03T17:26:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | #!/usr/bin/env python2
# transpiled with BefunCompile v1.1.0 (c) 2015
import sys
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
x0=1073741824
x1=2
x2=37
x3=37
x4=37
x5=5
x6=37
def _0():
sa(2)
sa(5)
return 1
def _1():
sa(x1-1)
sa(x1-1)
return 2
def _2():
return (4)if(sp()!=0)else(3)
def _3():
global x1
global t0
global x5
sp();
sp();
sa(sp()+1);
sa(sr());
x1=sr()
t0=(sr()*3)-1
sa(sp()*t0);
sa(td(sp(),2))
x5=sr()
return 1
def _4():
global x2
global t0
global t1
global x3
global x6
global x4
global x0
x2=sr()
t0=(sr()*3)-1
sa(sp()*t0);
t1=sp()
t1=td(t1,2)
x3=t1
x6=0
sa(sp()-t1);
sa(sp()*24);
sa(sp()+1);
x4=sr()
sa(x0)
sa((1)if(x0>x4)else(0))
return 5
def _5():
return (24)if(sp()!=0)else(6)
def _6():
sa(sr());
return (21)if(sp()!=0)else(7)
def _7():
global t0
global x6
sp();
sa(sp()-(x6*x6));
t0=x6
return (16)if(sp()!=0)else(8)
def _8():
global t0
t0=tm(t0,6)
t0=t0-5
t0=(0)if(t0!=0)else(1)
return (9)if((t0)!=0)else(16)
def _9():
global t0
global x6
global x4
global x0
sa(((x3+x5)*24)+1)
t0=((x3+x5)*24)+1
x6=0
x4=t0
sa(x0)
sa((1)if(x0>x4)else(0))
return 10
def _10():
return (20)if(sp()!=0)else(11)
def _11():
sa(sr());
return 12
def _12():
return (17)if(sp()!=0)else(13)
def _13():
global t0
global x6
sp();
sa(sp()-(x6*x6));
t0=x6
return (16)if(sp()!=0)else(14)
def _14():
global t0
t0=tm(t0,6)
t0=t0-5
return (16)if((t0)!=0)else(15)
def _15():
sys.stdout.write(str(x5-x3))
sys.stdout.flush()
sp();
return 25
def _16():
global x5
sa(x5)
sa(x2-1)
sa(x2-1)
return 2
def _17():
return (19)if((sr()+x6)<=x4)else(18)
def _18():
global x6
x6=td(x6,2)
sa(td(sp(),4))
sa(sr());
return 12
def _19():
global t0
global t1
global x4
global t2
global x6
t0=sr()+x6
t1=x4
t2=t1-t0
x4=t2
t0=(sr()*2)+x6
x6=t0
x6=td(x6,2)
sa(td(sp(),4))
return 11
def _20():
sa(td(sp(),4))
sa((1)if(sr()>x4)else(0))
return 10
def _21():
return (23)if((sr()+x6)<=x4)else(22)
def _22():
global x6
x6=td(x6,2)
sa(td(sp(),4))
sa(sr());
return (21)if(sp()!=0)else(7)
def _23():
global t0
global t1
global x4
global t2
global x6
t0=sr()+x6
t1=x4
t2=t1-t0
x4=t2
t0=(sr()*2)+x6
x6=t0
x6=td(x6,2)
sa(td(sp(),4))
return 6
def _24():
sa(td(sp(),4))
sa((1)if(sr()>x4)else(0))
return 5
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24]
c=0
while c<25:
c=m[c]()
| [
"mailport@mikescher.de"
] | mailport@mikescher.de |
55bc0100f96dccaae8b2b4afa737bd6e2b6f802d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02975/s867029117.py | add0ced25f576970a10a4f0ceff6408262fcb1fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | from collections import Counter
n = int(input())
a = [int(x) for x in input().split()]
d = Counter()
for x in a:
d[x] += 1
if d[0] == n:
print('Yes')
elif len(d.keys()) == 2 and d[0] == n//3:
print('Yes' if d[0] == n//3 else 'No')
elif len(d.keys()) == 3:
p, q, r = d.keys()
if d[p] == d[q] and d[q] == d[r] and p ^ q ^ r == 0:
print('Yes')
else:
print('No')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
049f96a1399aa3f4596c8a725a4a7465aad57cd5 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_24/models/pod_performance_by_array_get_response.py | 2ea4555dc4aa8a9849341dac5c9556006f6e67f9 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,111 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class PodPerformanceByArrayGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[PodPerformanceByArray]',
'total': 'list[PodPerformanceByArray]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.PodPerformanceByArray]
total=None, # type: List[models.PodPerformanceByArray]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[PodPerformanceByArray]): Performance data, broken down by array. If `total_only=true`, the `items` list will be empty.
total (list[PodPerformanceByArray]): The aggregate value of all items after filtering. When applicable, the average value is displayed instead. If applicable, the values are displayed for each field.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodPerformanceByArrayGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodPerformanceByArrayGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodPerformanceByArrayGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodPerformanceByArrayGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PodPerformanceByArrayGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PodPerformanceByArrayGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
f4686b28ce278160234b7d3422bc31b22162de57 | 7522752dcce57932287aea367c3f37c596897056 | /python/checkio/ghosts_age.py | 47c27c69d34a6afa01588f86a4afa55ba8e34f65 | [] | no_license | gsrr/Programs | e8a63fcab6a49acdccbcb4d10f8e6c7f2de62288 | 52925dd533a1f931e81fa0b1e7deb237d284d7c4 | refs/heads/master | 2021-01-23T09:33:25.410211 | 2016-02-10T03:14:26 | 2016-02-10T03:14:26 | 13,143,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py |
# -*- coding: utf-8 -*-
#從例子看起來就是:
'''
若該number是Fibonacci number, 就拿前一數扣掉目前的number做為age
若不是的話, 就拿前一數加1做為age.
'''
import time
FibonArr = []
def createFibonArr(n):
global FibonArr
base = 10000
FibonArr.append(10000)
FibonArr.append(9999)
FibonArr.append(9997)
preFibon = 1
curFibon = 2
for i in range(3, n):
#print i
#time.sleep(1)
if i == preFibon + curFibon:
FibonArr.append(FibonArr[i-1] - i)
preFibon = curFibon
curFibon = i
else:
FibonArr.append(FibonArr[i-1] + 1)
#print FibonArr
def checkio(opacity):
global FibonArr
print FibonArr.index(opacity)
return FibonArr.index(opacity)
return 0
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
createFibonArr(5001)
assert checkio(10000) == 0, "Newborn"
assert checkio(9999) == 1, "1 year"
assert checkio(9997) == 2, "2 years"
assert checkio(9994) == 3, "3 years"
assert checkio(9995) == 4, "4 years"
assert checkio(9990) == 5, "5 years"
| [
"jerrycheng1128@gmail.com"
] | jerrycheng1128@gmail.com |
c56ff775adfc2926100de1ced9e14e9dc8d9c225 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/test/test_print.py | a7ba39302da6cf9e3c1a11d33abf02ed3038a11f | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:05aa6cec2a4f3e10a764ff415f8d632be612ebae69d92ce150a6d1bc9de99c37
size 7766
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
d5fba800e7a84c1b08eef34db3e80dd12c422fa1 | c896b09d7172cc8560dbd7fe3388b7e19143fbd4 | /tests/single_shape/test_data_processing.py | 3c6cc391937d1a7a5b56d63dc360947764852765 | [
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-commercial-license",
"AGPL-3.0-or-later",
"AGPL-3.0-only"
] | permissive | justusschock/shapedata | e6c24ad63c04fa3ff29e050ca3e17ad09c077666 | cfe288454631dd4e59242081c52e4297fecce927 | refs/heads/master | 2020-04-19T18:13:37.587500 | 2020-02-05T14:14:38 | 2020-02-05T14:14:38 | 168,357,536 | 8 | 1 | BSD-2-Clause | 2019-05-16T11:07:11 | 2019-01-30T14:35:35 | Python | UTF-8 | Python | false | false | 704 | py | from shapedata.single_shape import SingleShapeDataProcessing
import os
import warnings
def test_data_processing():
data = SingleShapeDataProcessing.from_dir(os.path.join(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"example_files"))
assert data.samples
data.resize((224, 224))
assert data[0].img.shape[: -1] == (224, 224)
assert data.images
assert data.landmarks
assert data.lmk_pca(True, True).shape == (2, 68, 2)
data[0] = 5
assert data[0] == 5
try:
data[1] = 500
assert False, "Should raise IndexError since index 1 is out\
of bound"
except IndexError:
assert True | [
"justus.schock@rwth-aachen.de"
] | justus.schock@rwth-aachen.de |
2475af52d07cc6493332273284b22c5a30fe6cc6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02985/s061514161.py | c1088dc64c0c9162493c27df1410724030b3b86a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | from collections import deque
mod = 10**9 + 7
n, k = map(int, input().split())
edges = [[]*n for _ in range(n)]
for _ in range(n-1):
a, b = map(int, input().split())
a -= 1
b -= 1
edges[a].append(b)
edges[b].append(a)
root = 0
d = deque([root])
visited = [False]*n
visited[root] = True
colored = [-1]*n
colored[root] = k
while d:
p = d.pop()
count = 0
for c in edges[p]:
if visited[c]:
continue
visited[c] = True
d.append(c)
if p == root:
colored[c] = (k-1-count) % mod
else:
colored[c] = (k-2-count) % mod
count += 1
ans = 1
for i in range(n):
ans = ans*colored[i] % mod
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
091be7cc8f11ee0ca9b192cce731691083899e48 | 122306a7e284774ac80c6fc381e4db2f26ea0c30 | /bin/deploy/model_copy.py | c1f50ee84cdd54ca5dcb35088c86bf40e4256902 | [
"BSD-3-Clause"
] | permissive | e-mission/e-mission-server | 4da028e34eaac32c0d27ec929f68b23905e6ca1e | 94e7478e627fa8c171323662f951c611c0993031 | refs/heads/master | 2023-09-01T06:53:33.926971 | 2023-08-15T23:39:00 | 2023-08-15T23:39:00 | 26,307,245 | 26 | 131 | BSD-3-Clause | 2023-09-14T05:08:59 | 2014-11-07T06:39:02 | Jupyter Notebook | UTF-8 | Python | false | false | 240 | py | import json
sample_path = "emission/tests//data/seed_model_from_test_data.json"
f = open(sample_path, "r")
data = json.loads(f.read())
f.close()
real_path = "./seed_model.json"
f = open(real_path, "w")
f.write(json.dumps(data))
f.close()
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
5a86546730bf97f9abd3dcacd1dc5764af53d7b3 | c96700961f09bbac141858d98141428d643322e8 | /homeassistant/components/aemet/__init__.py | 54c93f43a25ae9f33e0dff98a2cb6c2cbc019b85 | [
"Apache-2.0"
] | permissive | DerMetzger69/core | b3b6f30535f2e607e08dd6544e130b452f44c3a1 | 02a82d3f00c610f94d3366cc34540bdfa94a2c8e | refs/heads/dev | 2023-03-18T10:42:52.605222 | 2021-03-13T09:53:26 | 2021-03-13T09:53:26 | 345,092,595 | 1 | 0 | Apache-2.0 | 2021-03-06T13:32:49 | 2021-03-06T12:49:54 | null | UTF-8 | Python | false | false | 1,859 | py | """The AEMET OpenData component."""
import asyncio
import logging
from aemet_opendata.interface import AEMET
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import HomeAssistant
from .const import DOMAIN, ENTRY_NAME, ENTRY_WEATHER_COORDINATOR, PLATFORMS
from .weather_update_coordinator import WeatherUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the AEMET OpenData component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry):
"""Set up AEMET OpenData as config entry."""
name = config_entry.data[CONF_NAME]
api_key = config_entry.data[CONF_API_KEY]
latitude = config_entry.data[CONF_LATITUDE]
longitude = config_entry.data[CONF_LONGITUDE]
aemet = AEMET(api_key)
weather_coordinator = WeatherUpdateCoordinator(hass, aemet, latitude, longitude)
await weather_coordinator.async_refresh()
hass.data[DOMAIN][config_entry.entry_id] = {
ENTRY_NAME: name,
ENTRY_WEATHER_COORDINATOR: weather_coordinator,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
| [
"noreply@github.com"
] | DerMetzger69.noreply@github.com |
7bf7695ca1582200479cb2074f9c31f8a22a686f | 6ffa236a008d1cd1dc70f2c8ea0544d20ec350ee | /aries_cloudagent/messaging/actionmenu/util.py | a560ad5789542949e6bfc705400e4d1cc1877517 | [
"Apache-2.0"
] | permissive | blockpass-identity-lab/aries-fl-demo | 99e849f782dd80e729e3fe24c3af2881c5c49dca | 310b748c1ac2e814ec6f97c46ddbb9985584e0fc | refs/heads/master | 2022-07-06T18:37:16.007582 | 2020-04-23T15:48:33 | 2020-04-23T15:48:33 | 221,698,330 | 5 | 0 | Apache-2.0 | 2021-02-26T02:40:03 | 2019-11-14T12:58:58 | Python | UTF-8 | Python | false | false | 1,853 | py | """Action menu utility methods."""
from ...config.injection_context import InjectionContext
from ...storage.base import BaseStorage, StorageRecord, StorageNotFoundError
from ..responder import BaseResponder
from .messages.menu import Menu
MENU_RECORD_TYPE = "connection-action-menu"
async def retrieve_connection_menu(
connection_id: str, context: InjectionContext
) -> Menu:
"""Retrieve the previously-received action menu."""
storage: BaseStorage = await context.inject(BaseStorage)
try:
record = await storage.search_records(
MENU_RECORD_TYPE, {"connection_id": connection_id}
).fetch_single()
except StorageNotFoundError:
record = None
return Menu.from_json(record.value) if record else None
async def save_connection_menu(
menu: Menu, connection_id: str, context: InjectionContext
):
"""Save a received action menu."""
storage: BaseStorage = await context.inject(BaseStorage)
try:
record = await storage.search_records(
MENU_RECORD_TYPE, {"connection_id": connection_id}
).fetch_single()
except StorageNotFoundError:
if menu:
record = StorageRecord(
type=MENU_RECORD_TYPE,
value=menu.to_json(),
tags={"connection_id": connection_id},
)
await storage.add_record(record)
else:
if menu:
await storage.update_record_value(record, menu.to_json())
else:
await storage.delete_record(record)
responder: BaseResponder = await context.inject(BaseResponder, required=False)
if responder:
await responder.send_webhook(
"actionmenu",
{
"connection_id": connection_id,
"menu": menu.serialize() if menu else None,
},
)
| [
"cywolf@gmail.com"
] | cywolf@gmail.com |
823a02d696d223f8850a813472cad71fecda108a | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=98/sched.py | 2eaba81adb1dd9db25f2adaa5406d92064c54555 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | -X FMLP -Q 0 -L 2 64 250
-X FMLP -Q 0 -L 2 58 175
-X FMLP -Q 0 -L 2 46 300
-X FMLP -Q 0 -L 2 44 200
-X FMLP -Q 1 -L 2 43 175
-X FMLP -Q 1 -L 2 42 150
-X FMLP -Q 1 -L 2 41 125
-X FMLP -Q 2 -L 1 33 125
-X FMLP -Q 2 -L 1 32 100
-X FMLP -Q 2 -L 1 29 100
-X FMLP -Q 3 -L 1 23 175
-X FMLP -Q 3 -L 1 10 125
-X FMLP -Q 3 -L 1 10 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
6852be2525e6c6d38e80b76604bca27eabdfbd05 | b7086d5e907aaf983af5b8d7d6f74c4fc6e40f23 | /DarkZ/StatTools/YieldProducer.py | ed7ee4a9a243cc887b72e88431b5e626ad33c41c | [] | no_license | ahmad3213/PyNTupleProcessor | da40c596f275406f21e83e117c5b8020d6ee309c | c84fa597b132e91342226b12a74213f675c0b125 | refs/heads/master | 2023-03-29T18:59:19.454585 | 2020-09-21T21:52:25 | 2020-09-21T21:52:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | from Core.Module import Module
class YieldProducer(Module):
def __init__(self,name,mass_window_list,systList=[]):
self.name = name
self.mass_window_list = mass_window_list
self.systList = systList
#self.channelNames = ["4mu","4e","2e2mu","comb"]
self.channelNames = ["4mu","4e","2e2mu","2mu2e","comb"]
def begin(self):
for mWindow in self.mass_window_list:
for channelName in self.channelNames:
#histName = "_".join([mWindow.makeHistName(),self.dataset.parent.name,channelName,])
histName = "_".join([mWindow.makeHistName(),channelName,])
self.writer.book(histName,"TH1D",histName,"",1,-0.5,0.5)
for syst in self.systList:
#sysHistName = "_".join([mWindow.makeHistName(),self.dataset.parent.name,channelName,syst.name])
sysHistName = "_".join([mWindow.makeHistName(),channelName,syst.name])
self.writer.book(sysHistName,"TH1D",sysHistName,"",1,-0.5,0.5)
def analyze(self,event):
for multi_mWindow in self.mass_window_list:
for mWindow in multi_mWindow:
if not mWindow.selection(event): continue
if mWindow.inWindow(event.massZ2[0]):
histName = "_".join([multi_mWindow.makeHistName(),"comb",])
self.writer.objs[histName].Fill(0.,event.weight)
channelName = mWindow.name
histName = "_".join([multi_mWindow.makeHistName(),channelName,])
self.writer.objs[histName].Fill(0.,event.weight)
for syst in self.systList:
sysHistName = "_".join([multi_mWindow.makeHistName(),"comb",syst.name])
self.writer.objs[sysHistName].Fill(0.,event.weight*syst.factorFunc(event))
sysHistName = "_".join([multi_mWindow.makeHistName(),channelName,syst.name])
self.writer.objs[sysHistName].Fill(0.,event.weight*syst.factorFunc(event))
return True
def end(self):
for syst in self.systList:
syst.factorFunc.end()
for multi_mWindow in self.mass_window_list:
multi_mWindow.end()
| [
"lucien1011@gmail.com"
] | lucien1011@gmail.com |
2729ebff5bce2d53aecd6157e359bbae4f59cafa | b15e5e8ca56996a34b2339b71a54b8b85724c950 | /代码/jieba分词技术举例.py | ac2488693b0942b3e8567ad842008a1230ab4e4d | [] | no_license | JMbaozi/Text-analysis | 51adffcc67ed4ea80bb1cbc4d8e6c129b2338d99 | e5394737c5119a7c9eff6201e1448bcedc3b1be0 | refs/heads/master | 2022-04-30T12:48:03.354418 | 2022-03-25T02:27:28 | 2022-03-25T02:27:28 | 217,422,637 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,929 | py | """
# encoding=utf-8
import jieba
seg_list = jieba.cut("三万六千五百零一块", cut_all=False)
print("Default Mode: " + "/ ".join(seg_list)) # 精确模式
"""
"""
# encoding=utf-8
import jieba
jieba.load_userdict("hlmdict.txt")
jieba.add_word('大荒山',freq = None,tag = None)
jieba.add_word('女娲氏',freq = None,tag = None)
jieba.add_word('无稽崖',freq = None,tag = None)
jieba.add_word('高经十二丈',freq = None,tag = None)
jieba.add_word('方经二十四丈',freq = None,tag = None)
seg_list = jieba.cut("原来女娲氏炼石补天之时,于大荒山无稽崖炼成高经十二丈、方经二十四丈顽石三万六千五百零一块。", cut_all=False)
print("Default Mode: " + "/ ".join(seg_list)) # 精确模式
"""
"""
import jieba
data = "原来女娲氏炼石补天之时,于大荒山无稽崖炼成高经十二丈、方经二十四丈顽石三万六千五百零一块。"
cut_result = jieba.cut(data)
print('==' * 20)
print('/'.join(cut_result))
jieba.load_userdict('hlmdicttest.txt')
cut_result = jieba.cut(data)
print('=='*20)
print('/'.join(cut_result))
"""
"""
import sys
sys.path.append('../')
import jieba
import jieba.analyse
from optparse import OptionParser
USAGE = "usage: python -extract_tags.py [file name] -k [top k]"
parser = OptionParser(USAGE)
parser.add_option("-k", dest="topK")
opt, args = parser.parse_args()
if len(args) < 1:
print(USAGE)
sys.exit(1)
file_name = args[0]
if opt.topK is None:
topK = 10
else:
topK = int(opt.topK)
content = open('_test.txt', 'rb').read()
tags = jieba.analyse.extract_tags(content, topK=topK)
print(",".join(tags))
"""
"""
import jieba
import jieba.analyse
import math
keywords1=jieba.analyse.extract_tags("当日地陷东南,这东南一隅有处曰姑苏,有城曰阊门者,最是红尘中一二等富贵风流之地。这阊门外有个十里街,街内有个仁清巷,巷内有个古庙,因地方窄狭,人皆呼作葫芦庙。庙旁住着一家乡宦,姓甄名费,字士隐。嫡妻封氏,情性贤淑,深明礼义。家中虽不甚富贵,然本地便也推他为望族了。只因这甄士隐禀性恬淡,不以功名为念,每日只以观花修竹、酌酒吟诗为乐,倒是神仙一流人品。只是一件不足:如今年已半百,膝下无儿,只有一女,乳名英莲,年方三岁。")
print('关键词提取'+"/".join(keywords1))
keywords_top=jieba.analyse.extract_tags("当日地陷东南,这东南一隅有处曰姑苏,有城曰阊门者,最是红尘中一二等富贵风流之地。这阊门外有个十里街,街内有个仁清巷,巷内有个古庙,因地方窄狭,人皆呼作葫芦庙。庙旁住着一家乡宦,姓甄名费,字士隐。嫡妻封氏,情性贤淑,深明礼义。家中虽不甚富贵,然本地便也推他为望族了。只因这甄士隐禀性恬淡,不以功名为念,每日只以观花修竹、酌酒吟诗为乐,倒是神仙一流人品。只是一件不足:如今年已半百,膝下无儿,只有一女,乳名英莲,年方三岁。",topK=3)
print('关键词topk'+"/".join(keywords_to#有时不确定提取多少关键词,可利用总词的百分比
print('总词数{}'.format(len(list(jieba.cut("当日地陷东南,这东南一隅有处曰姑苏,有城曰阊门者,最是红尘中一二等富贵风流之地。这阊门外有个十里街,街内有个仁清巷,巷内有个古庙,因地方窄狭,人皆呼作葫芦庙。庙旁住着一家乡宦,姓甄名费,字士隐。嫡妻封氏,情性贤淑,深明礼义。家中虽不甚富贵,然本地便也推他为望族了。只因这甄士隐禀性恬淡,不以功名为念,每日只以观花修竹、酌酒吟诗为乐,倒是神仙一流人品。只是一件不足:如今年已半百,膝下无儿,只有一女,乳名英莲,年方三岁。")))))
total=len(list(jieba.cut("当日地陷东南,这东南一隅有处曰姑苏,有城曰阊门者,最是红尘中一二等富贵风流之地。这阊门外有个十里街,街内有个仁清巷,巷内有个古庙,因地方窄狭,人皆呼作葫芦庙。庙旁住着一家乡宦,姓甄名费,字士隐。嫡妻封氏,情性贤淑,深明礼义。家中虽不甚富贵,然本地便也推他为望族了。只因这甄士隐禀性恬淡,不以功名为念,每日只以观花修竹、酌酒吟诗为乐,倒是神仙一流人品。只是一件不足:如今年已半百,膝下无儿,只有一女,乳名英莲,年方三岁。")))
get_cnt=math.ceil(total*0.1) #向上取整
print('从%d 中取出%d 个词'% (total,get_cnt))
keywords_top1=jieba.analyse.extract_tags("当日地陷东南,这东南一隅有处曰姑苏,有城曰阊门者,最是红尘中一二等富贵风流之地。这阊门外有个十里街,街内有个仁清巷,巷内有个古庙,因地方窄狭,人皆呼作葫芦庙。庙旁住着一家乡宦,姓甄名费,字士隐。嫡妻封氏,情性贤淑,深明礼义。家中虽不甚富贵,然本地便也推他为望族了。只因这甄士隐禀性恬淡,不以功名为念,每日只以观花修竹、酌酒吟诗为乐,倒是神仙一流人品。只是一件不足:如今年已半百,膝下无儿,只有一女,乳名英莲,年方三岁。",topK=get_cnt)
print('关键词topk'+"/".join(keywords_top1))''
"""
"""
import jieba.posseg as pseg
import jieba
#jieba.load_userdict("hlmdict.txt")
words = pseg.cut("当日地陷东南,这东南一隅有处曰姑苏,有城曰阊门者,最是红尘中一二等富贵风流之地。")
for word, flag in words:
print('%s %s' % (word, flag))
"""
"""
import sys
sys.path.append('../')
import jieba
import jieba.analyse
from optparse import OptionParser
USAGE = "python extract_tags_with_weight.py E:\文本分析学习\_test.txt -k [top k] -w [with weight=1 or 0]"
parser = OptionParser(USAGE)
parser.add_option("-k", dest="topK")
opt, args = parser.parse_args()
if len(args) < 1:
print(USAGE)
sys.exit(1)
file_name = args[0]
if opt.topK is None:
topK = 10
else:
topK = int(opt.topK)
content = open(file_name, 'rb').read()
tags = jieba.analyse.extract_tags(content, topK=topK)
print(",".join(tags))
"""
"""
#jieba.analyse.extract_tags(string,topK=20,withWeight=True,allowPOS=(" "))
#string:待处理语句
#topK:返回TF、IDF权重最大的关键字的个数,默认20
#withWeight:是否返回权重值,默认false
#allowPOS:是否仅返回指定类型,默认为空
import jieba.analyse
# 字符串前面加u表示使用unicode编码
content = u'_text'
keywords = jieba.analyse.extract_tags(content, topK=20, withWeight=True, allowPOS=())
# 访问提取结果
for item in keywords:
print(item[0], item[1])
"""
"""
from jieba.analyse import *
with open('liulaolao.txt',encoding = 'utf-8') as f:
data = f.read()
for keyword, weight in extract_tags(data, topK=10, withWeight=True):
print('%s %s' % (keyword, weight))
"""
"""
from jieba.analyse import *
import jieba
stopwords = ['奶奶','那里','什么','姑娘','说道','众人','如今','一面','你们','袭人','只见']
with open('hongloumeng.txt',encoding = 'utf-8') as f:
data = f.read()
for keyword, weight in extract_tags(data, topK=21, withWeight=True):
tag = 1
for i in range(11):
if keyword == stopwords[i]:
tag = 0
if tag == 1:
print('%s %s' % (keyword,weight))
"""
#刘姥姥进大观园
from jieba.analyse import *
import jieba
stopwords = ['奶奶','婆子','一个','姑娘','说道','众人','如今','一面','你们','丫鬟','只见']
with open('liulaolao.txt',encoding = 'utf-8') as f:
data = f.read()
for keyword, weight in extract_tags(data, topK=10, withWeight=True):
tag = 1
for i in range(11):
if keyword == stopwords[i]:
tag = 0
if tag == 1:
print('%s %s' % (keyword,weight))
| [
"l"
] | l |
096fab96c391dc22a14229e0bdae002832334c52 | d21071464bef4f3fd51e554f280418d06975a77e | /leetcode/1153. String Transforms Into Another String.py | 2b5d7049fc62a91089fa9d0c87e999c56957315b | [] | no_license | DeshErBojhaa/sports_programming | ec106dcc24e96231d447cdcac494d76a94868b2d | 96e086d4ee6169c0f83fff3819f38f32b8f17c98 | refs/heads/master | 2021-06-13T19:43:40.782021 | 2021-03-27T14:21:49 | 2021-03-27T14:21:49 | 164,201,394 | 1 | 0 | null | 2019-08-27T22:21:26 | 2019-01-05T09:39:41 | C++ | UTF-8 | Python | false | false | 379 | py | # 1153. String Transforms Into Another String
class Solution:
def canConvert(self, str1: str, str2: str) -> bool:
if str1 == str2:
return True
d = {}
for a, b in zip(str1, str2):
if a not in d:
d[a] = b
if d[a] != b:
return False
return len(set(str2)) < 26
| [
"noreply@github.com"
] | DeshErBojhaa.noreply@github.com |
a12f06f63673f3aca5b1c00c6fe16a857adb8630 | 10b5adc3ef1821196e47967d2b89b2be82ea1f18 | /code/sort/choose_sort.py | 82181efaeffdde874249dbccfaca50fa739eb6e7 | [] | no_license | JiaXingBinggan/For_work | 4951ccb82f24c86404b1c090fd8394b765225c24 | 97cc61fefe0bedf5161687aab92fb09b0df990e2 | refs/heads/master | 2022-12-18T22:44:49.180313 | 2020-09-25T13:13:14 | 2020-09-25T13:13:14 | 282,564,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | class Solution:
def chooseSort(self, unorderedList):
'''
选择排序,其只将最大项的位置记录下来,每一趟仅交换一次
:param unorderedList:
:return:
'''
for pass_num in range(len(unorderedList) - 1, -1, -1):
max_location = 0
for i in range(pass_num + 1):
if unorderedList[i] > unorderedList[max_location]:
max_location = i
unorderedList[pass_num], unorderedList[max_location] = unorderedList[max_location], unorderedList[pass_num]
return unorderedList
def chooseSort2(self, unorderedList):
for pass_num in range(len(unorderedList) - 1, -1, -1):
max_location = 0
for i in range(1, pass_num + 1):
if unorderedList[i] > unorderedList[max_location]:
max_location = i
unorderedList[pass_num], unorderedList[max_location] = unorderedList[max_location], unorderedList[pass_num]
return unorderedList
s = Solution()
print(s.chooseSort([2, 4, 2, 6, 3, 8, 19, 12]))
print(s.chooseSort2([2, 4, 2, 6, 3, 8, 19, 12])) | [
"729869585@qq.com"
] | 729869585@qq.com |
1990846c598373f5731c4d0e25b329e74c9061b4 | 112c9c81d20b1fa6d4c46f2df1c2f352d8a2249f | /sdmx/exceptions.py | cae2a491f9209eb1a8fdc5e56befea2b14add06c | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | khaeru/sdmx | 910b1da621ef763f500fa8bb28c87cddadc0c9aa | 7a648fb07b78d06b766aa65978101526d393eb14 | refs/heads/main | 2023-08-17T03:24:25.527374 | 2023-08-16T12:16:53 | 2023-08-16T12:16:53 | 87,204,730 | 10 | 12 | Apache-2.0 | 2023-09-03T19:48:53 | 2017-04-04T15:38:21 | Python | UTF-8 | Python | false | false | 483 | py | from requests import HTTPError
from requests.exceptions import SSLError
__all__ = [
"HTTPError",
"ParseError",
"SSLError",
"XMLParseError",
]
class ParseError(Exception):
""":class:`~.reader.Reader` is unable to parse a message."""
class XMLParseError(Exception):
""":class:`.xml.Reader` is unable to parse a message."""
def __str__(self):
c = str(self.__cause__)
return f"{self.__cause__.__class__.__name__}{': ' + c if c else ''}"
| [
"mail@paul.kishimoto.name"
] | mail@paul.kishimoto.name |
155d8ef6b398a34cdcd4e2a07a6c344d1727f792 | ef268924e126f127eadbc82a709acfdaeb505925 | /1403. Minimum Subsequence in Non-Increasing Order/1403.py | 74993af1d1e1ccc0cdcd451d5c3171a1155552a7 | [] | no_license | HappyStorm/LeetCode-OJ | e7dec57156b67886cbb57dc6401b2164eaed7476 | 8e742bc66a70900bbb7bc0bd740cec6092577a6a | refs/heads/master | 2022-08-30T11:40:57.658447 | 2022-08-09T16:59:49 | 2022-08-09T16:59:49 | 55,757,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
ans, nums = [], sorted(nums)
cur, _sum = 0, sum(nums)
while cur <= _sum:
ans.append(nums.pop())
cur += ans[-1]
_sum -= ans[-1]
return ans
| [
"gogogo753951741963@gmail.com"
] | gogogo753951741963@gmail.com |
249f58c4e17538cc3ee9ad47ce65011cc2c09cf2 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/44a9b167bda9654ce60588cf2dcee88e4bad831d-<test_apply_attach_name>-bug.py | 2a31ccb713dbd4818506343e01aa576cc026ba9e | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | def test_apply_attach_name(self):
result = self.frame.apply((lambda x: x.name))
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply((lambda x: x.name), axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
result = self.frame.apply((lambda x: np.repeat(x.name, len(x))))
expected = DataFrame(np.tile(self.frame.columns, (len(self.frame.index), 1)), index=self.frame.index, columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply((lambda x: np.repeat(x.name, len(x))), axis=1)
expected = Series((np.repeat(t[0], len(self.frame.columns)) for t in self.frame.itertuples()))
expected.index = self.frame.index
assert_series_equal(result, expected) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
34b3c7ae001c8a0ccb57fc5fad68b772d2c3c744 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /b67PHXfgMwpD9rAeg_3.py | 300afd23a5128f525b83cd6f0ff7a5407d06d35c | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | """
Create a function that takes a string as an argument and returns `True` if
each letter in the string is surrounded by a plus sign. Return `False`
otherwise.
### Examples
plus_sign("+f+d+c+#+f+") ➞ True
plus_sign("+d+=3=+s+") ➞ True
plus_sign("f++d+g+8+") ➞ False
plus_sign("+s+7+fg+r+8+") ➞ False
### Notes
For clarity, each **letter** must have a plus sign on both sides.
"""
def plus_sign(txt):
if len(txt)<3: return False
for i in range(1,len(txt)-1):
if txt[i].isalpha():
if txt[i-1] != '+' or txt[i+1] != '+':
return False
return True
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
900a52950a0e1774873ed52de10a6687243bb08b | 50b677eb72104d6282edfa8c45f55ef680b9a431 | /v1/kdm-manager.js | 0af0858aea25cc777cd6fbd5dec4ec0824bf8fc4 | [] | no_license | chummer5a/kdm-manager | 6a2cdf1b390c5dabdb3713ebf5358ea73c6d56d8 | 37c69a39241ebee1b4f5e8e9db05e695f2e03ea8 | refs/heads/master | 2021-01-10T23:03:17.754551 | 2016-10-10T16:40:44 | 2016-10-10T16:40:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | js | #!/usr/bin/env python
import sys
#
# This script renders a JS app file on demand
#
if __name__ == "__main__":
print("Content-type: text/css\n\n")
raw_file = file("media/app.js", "rb").read()
print raw_file
sys.exit()
| [
"toconnell@tyrannybelle.com"
] | toconnell@tyrannybelle.com |
9c5ac66dc16f65e773072aa208fbf5f9eefb8166 | 31bc3fdc7c2b62880f84e50893c8e3d0dfb66fa6 | /language/python_369/python_369/built_in_types/sequence_/str_/create.py | 79fa70e126f01cf3e0ee5698213578c89350bf54 | [] | no_license | tpt5cu/python-tutorial | 6e25cf0b346b8182ebc8a921efb25db65f16c144 | 5998e86165a52889faf14133b5b0d7588d637be1 | refs/heads/master | 2022-11-28T16:58:51.648259 | 2020-07-23T02:20:37 | 2020-07-23T02:20:37 | 269,521,394 | 0 | 0 | null | 2020-06-05T03:23:51 | 2020-06-05T03:23:50 | null | UTF-8 | Python | false | false | 1,539 | py | # https://realpython.com/python-string-formatting/
# https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
# https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str
# https://stackoverflow.com/questions/27331819/whats-the-difference-between-a-character-a-code-point-a-glyph-and-a-grapheme
'''
Strings are immutable sequences of Unicode code points
- In Python 3, str is always the Unicode type
- Recall that a Unicode code point is simply a number that is given special meaning by Unicode
'''
def string_interpolation():
'''
String interpolation, aka f-strings, only exist in Python 3
- Arbitrary Python expressions can be embedded within f-strings
- string interpolation can also use format
'''
name = 'Austin'
print(f'Hello there, {name}') # Hello there, Austin
print(f'There are {365 * 24 * 60 * 60} seconds in a year!') # There are 31536000 seconds in a year!
print(f'There are {365 * 24 * 60 * 60:e} seconds in a year!') # There are 3.153600e+07 seconds in a year!
print(f'There are {365 * 24 * 60 * 60:.2f} seconds in a year!') # There are 31536000.00 seconds in a year!
def removed_unicode_function():
'''The unicode() function and type were removed in Python 3, but the old unicode literal syntax now just creates str objects'''
#u = unicode('hello') # NameError: name 'unicode' is not defined
u = u'hello'
print(type(u)) # <class 'str'>
if __name__ == '__main__':
#string_interpolation()
removed_unicode_literal() | [
"uif93194@gmail.com"
] | uif93194@gmail.com |
2143bfe2f27376e3c4a543e6e500a5ec29c3acb6 | 8d9318a33afc2c3b5ca8ac99fce0d8544478c94a | /Books/Casandra DB/opscenter-5.1.0/lib/py/orbited/config.py | b99e2552ba9aa9286c6cd7e573a7899845635690 | [] | no_license | tushar239/git-large-repo | e30aa7b1894454bf00546312a3fb595f6dad0ed6 | 9ee51112596e5fc3a7ab2ea97a86ec6adc677162 | refs/heads/master | 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:947c21ea69deeac668131461faf03067f40ca5465a810ded94f3be7de6850a32
size 4070
| [
"tushar239@gmail.com"
] | tushar239@gmail.com |
3a91929e8281c2c581601d35fbb5e10507183f52 | 8ca52d458dda5b1a557828003240942ed02e19d9 | /41_3.py | 1d4d5a23ff3fe1550cb40c82dda822f55cea4fa5 | [
"MIT"
] | permissive | rursvd/pynumerical2 | 48c8a7707c4327bfb88d0b747344cc1d71b80b69 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | refs/heads/master | 2020-04-19T04:15:34.457065 | 2019-12-06T04:12:16 | 2019-12-06T04:12:16 | 167,957,944 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from numpy import zeros
# Define abm4 funciton
def abm4(f,t0,tf,y0,n):
h = (tf - t0)/n
t = zeros(n+1)
y = zeros(n+1)
t[0] = t0
y[0] = y0
# RK4
for i in range(3):
K0 = h * f(t[i], y[i])
K1 = h * f(t[i] + h/2.0, y[i] + K0/2.0)
K2 = h * f(t[i] + h/2.0, y[i] + K1/2.0)
K3 = h * f(t[i] + h, y[i] + K2)
y[i+1] = y[i] + 1.0 / 6.0 * (K0 + 2.0 * K1 + 2.0 * K2 + K3)
t[i+1] = t[i] + h
# Predictor and Corrector
for i in range(3,n):
a0 = f(t[i],y[i])
a1 = f(t[i-1],y[i-1])
a2 = f(t[i-2],y[i-2])
a3 = f(t[i-3],y[i-3])
yp = y[i] + h / 24.0 * (55.0 * a0 - 59.0 * a1 + 37.0 * a2 - 9.0 * a3)
t[i+1] = t[i] + h
b0 = f(t[i+1],yp)
b1 = f(t[i],y[i])
b2 = f(t[i-1],y[i-1])
b3 = f(t[i-2],y[i-2])
y[i+1] = y[i] + h / 24.0 * (9.0 * b0 + 19.0 * b1 - 5.0 * b2 + b3)
return t,y
# Define functions
def f(t,y):
return t - y
# Set initial conditions
t0 = 0.0
tf = 1.0
y0 = 1.0
n = 5
# Execute ABM4
t, yabm4 = abm4(f,t0,tf,y0,n)
# Print results
print("%5s %8s" % ('t','y'))
for i in range(n+1):
print("%9.4f %9.4f" % (t[i],yabm4[i]))
| [
"noreply@github.com"
] | rursvd.noreply@github.com |
6fc167d645d3db0a6cfb071b13ca2ce7a0b52b3b | f4ad721b7158ff2605be6f7e4bde4af6e0e11364 | /vt_manager_kvm/src/python/vt_manager_kvm/communication/sfa/rspecs/elements/versions/nitosv1Lease.py | ee4f5159a0476fbf555ab0111dc5b0f761e90bf1 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | ict-felix/stack | 3fb4222a0538c0dbbe351ccc3da1bafa9ca37057 | 583ccacf067b9ae6fc1387e53eaf066b4f3c0ade | refs/heads/master | 2021-01-10T10:16:29.851916 | 2016-06-22T15:11:11 | 2016-06-22T15:11:11 | 51,439,714 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,832 | py | from vt_manager_kvm.communication.sfa.util.sfalogging import logger
from vt_manager_kvm.communication.sfa.util.xml import XpathFilter
from vt_manager_kvm.communication.sfa.util.xrn import Xrn
from vt_manager_kvm.communication.sfa.rspecs.elements.element import Element
from vt_manager_kvm.communication.sfa.rspecs.elements.node import Node
from vt_manager_kvm.communication.sfa.rspecs.elements.sliver import Sliver
from vt_manager_kvm.communication.sfa.rspecs.elements.location import Location
from vt_manager_kvm.communication.sfa.rspecs.elements.hardware_type import HardwareType
from vt_manager_kvm.communication.sfa.rspecs.elements.disk_image import DiskImage
from vt_manager_kvm.communication.sfa.rspecs.elements.interface import Interface
from vt_manager_kvm.communication.sfa.rspecs.elements.bwlimit import BWlimit
from vt_manager_kvm.communication.sfa.rspecs.elements.pltag import PLTag
from vt_manager_kvm.communication.sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
from vt_manager_kvm.communication.sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
from vt_manager_kvm.communication.sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from vt_manager_kvm.communication.sfa.rspecs.elements.lease import Lease
from vt_manager_kvm.communication.sfa.rspecs.elements.channel import Channel
class NITOSv1Lease:
@staticmethod
def add_leases(xml, leases, channels):
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
elif len(leases) > 0:
network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
network_elem = xml.add_element('network', name = network_urn)
else:
network_elem = xml
# group the leases by slice and timeslots
grouped_leases = []
while leases:
slice_id = leases[0]['slice_id']
start_time = leases[0]['start_time']
duration = leases[0]['duration']
group = []
for lease in leases:
if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
group.append(lease)
grouped_leases.append(group)
for lease1 in group:
leases.remove(lease1)
lease_elems = []
for lease in grouped_leases:
#lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
lease_fields = ['slice_id', 'start_time', 'duration']
lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
lease_elems.append(lease_elem)
# add nodes of this lease
for node in lease:
lease_elem.add_instance('node', node, ['component_id'])
# add reserved channels of this lease
#channels = [{'channel_id': 1}, {'channel_id': 2}]
for channel in channels:
if channel['slice_id'] == lease[0]['slice_id'] and channel['start_time'] == lease[0]['start_time'] and channel['duration'] == lease[0]['duration']:
lease_elem.add_instance('channel', channel, ['channel_num'])
@staticmethod
def get_leases(xml, filter={}):
xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
lease_elems = xml.xpath(xpath)
return NITOSv1Lease.get_lease_objs(lease_elems)
@staticmethod
def get_lease_objs(lease_elems):
leases = []
channels = []
for lease_elem in lease_elems:
#get nodes
node_elems = lease_elem.xpath('./default:node | ./node')
for node_elem in node_elems:
lease = Lease(lease_elem.attrib, lease_elem)
lease['slice_id'] = lease_elem.attrib['slice_id']
lease['start_time'] = lease_elem.attrib['start_time']
lease['duration'] = lease_elem.attrib['duration']
lease['component_id'] = node_elem.attrib['component_id']
leases.append(lease)
#get channels
channel_elems = lease_elem.xpath('./default:channel | ./channel')
for channel_elem in channel_elems:
channel = Channel(channel_elem.attrib, channel_elem)
channel['slice_id'] = lease_elem.attrib['slice_id']
channel['start_time'] = lease_elem.attrib['start_time']
channel['duration'] = lease_elem.attrib['duration']
channel['channel_num'] = channel_elem.attrib['channel_num']
channels.append(channel)
return (leases, channels)
| [
"jenkins@localhost"
] | jenkins@localhost |
12e0d9daf80ccf156316332ec0d4444bd9b5627c | cee4e129e20626fd400edf251a61bb567d34287b | /app/modules/core/decorators.py | 9405be786944c186be3ad650b82862d1b12b7484 | [
"Apache-2.0"
] | permissive | nileshprasad137/silverback | ec255183d067d557aeaa30e8456325e7f99420b9 | 2e6e68dce8251a3275b987c895e6ce987c45776d | refs/heads/master | 2020-12-01T17:32:42.895795 | 2019-12-20T17:59:38 | 2019-12-20T17:59:38 | 230,712,144 | 1 | 0 | Apache-2.0 | 2019-12-29T06:26:27 | 2019-12-29T06:26:26 | null | UTF-8 | Python | false | false | 6,493 | py | # Copyright 2019 Silverbackhq
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Third Party Library
from django.shortcuts import redirect, reverse
from django.http import JsonResponse
from django.utils.translation import gettext as _
from django.http import Http404
# Local Library
from app.modules.core.acl import ACL
from app.modules.util.helpers import Helpers
from app.modules.core.response import Response
from app.modules.entity.option_entity import OptionEntity
def allow_if_authenticated_and_has_permission(permission):
def wrapper(function):
def wrap(controller, request, *args, **kwargs):
acl = ACL()
if not request.user or not request.user.is_authenticated or not acl.user_has_permission(request.user.id, permission):
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Oops! Access forbidden.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
return wrapper
def redirect_if_not_has_permission(permission):
def wrapper(function):
def wrap(controller, request, *args, **kwargs):
acl = ACL()
if not request.user or not request.user.is_authenticated or not acl.user_has_permission(request.user.id, permission):
raise Http404("Page not found.")
return function(controller, request, *args, **kwargs)
return wrap
return wrapper
def redirect_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
if "redirect" in request.GET:
return redirect(request.GET["redirect"])
return redirect("app.web.admin.dashboard")
return function(controller, request, *args, **kwargs)
return wrap
def login_if_not_authenticated_or_no_permission(permission):
def wrapper(function):
def wrap(controller, request, *args, **kwargs):
acl = ACL()
if not request.user or not request.user.is_authenticated:
return redirect(reverse("app.web.login") + "?redirect=" + request.get_full_path())
if permission and not acl.user_has_permission(request.user.id, permission):
raise Http404("Page not found.")
return function(controller, request, *args, **kwargs)
return wrap
return wrapper
def login_if_not_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if not request.user or not request.user.is_authenticated:
return redirect(reverse("app.web.login") + "?redirect=" + request.get_full_path())
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Access forbidden for authenticated users.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def allow_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if not request.user or not request.user.is_authenticated:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Oops! Access forbidden.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def redirect_if_not_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if OptionEntity().get_one_by_key("app_installed") is False else True
if not installed:
return redirect("app.web.install")
return function(controller, request, *args, **kwargs)
return wrap
def protect_metric_with_auth_key(function):
def wrap(controller, request, *args, **kwargs):
if kwargs["type"] == "prometheus":
prometheus_token = OptionEntity().get_one_by_key("prometheus_token")
if prometheus_token.value != "" and ("HTTP_AUTHORIZATION" not in request.META or prometheus_token.value != request.META["HTTP_AUTHORIZATION"]):
raise Http404("Host not found.")
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if OptionEntity().get_one_by_key("app_installed") is False else True
if installed:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Application is already installed.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def log_request_data(function):
def wrap(controller, request, *args, **kwargs):
correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
helper = Helpers()
logger = helper.get_logger(__name__)
logger.debug(_("Request Method: %(method)s {'correlationId':'%(correlationId)s'}") % {
"method": request.method,
"correlationId": correlation_id
})
logger.debug(_("Request URL: %(path)s {'correlationId':'%(correlationId)s'}") % {
"path": request.path,
"correlationId": correlation_id
})
logger.debug(_("Request Body: %(body)s {'correlationId':'%(correlationId)s'}") % {
"body": request.body,
"correlationId": correlation_id
})
return function(controller, request, *args, **kwargs)
return wrap
| [
"hello@clivern.com"
] | hello@clivern.com |
ec970d2cf4891d8130aaa4f7a35fea5f3de01572 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn13 - objektni minobot/M-17061-2321.py | 4a16430f80833c6df431f37d846cbf5b195eed6c | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,420 | py | class Minobot():
def __init__(self):
self.x=0
self.y=0
#smeri 0=desno, 1=dol, 2=levo, 3=gor
self.smer=0
self.zgodovina=[(0,0,0)]
def naprej(self, d):
if self.smer==0: self.x+=d
elif self.smer==1: self.y-=d
elif self.smer==2: self.x-=d
elif self.smer==3: self.y+=d
self.zgodovina.append((self.x, self.y, self.smer))
def desno(self):
self.smer+=1
if self.smer==4: self.smer=0
self.zgodovina.append((self.x, self.y, self.smer))
def levo(self):
self.smer -= 1
if self.smer==-1: self.smer=3
self.zgodovina.append((self.x, self.y, self.smer))
def koordinate(self):
return (self.x, self.y)
def razdalja(self):
return abs(self.x)+abs(self.y)
def razveljavi(self):
if len(self.zgodovina)>1:
self.x, self.y, self.smer = self.zgodovina[-2]
del self.zgodovina[-1]
import unittest
class TestObvezna(unittest.TestCase):
def test_minobot(self):
a = Minobot()
b = Minobot()
self.assertEqual(a.koordinate(), (0, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 0)
self.assertEqual(b.razdalja(), 0)
a.naprej(1)
self.assertEqual(a.koordinate(), (1, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 1)
self.assertEqual(b.razdalja(), 0)
a.naprej(2)
self.assertEqual(a.koordinate(), (3, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 3)
self.assertEqual(b.razdalja(), 0)
b.naprej(2)
self.assertEqual(a.koordinate(), (3, 0))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 3)
self.assertEqual(b.razdalja(), 2)
a.desno() # zdaj je obrnjen dol
a.naprej(4)
self.assertEqual(a.koordinate(), (3, -4))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 7)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen je levo
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -4))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 6)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen je gor
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -3))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 5)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen desno
a.naprej(3)
self.assertEqual(a.koordinate(), (5, -3))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 8)
self.assertEqual(b.razdalja(), 2)
b.levo() # obrnjen gor
b.naprej(3)
self.assertEqual(b.koordinate(), (2, 3))
self.assertEqual(b.razdalja(), 5)
b.levo() # obrnjen levo
b.naprej(3)
self.assertEqual(b.koordinate(), (-1, 3))
self.assertEqual(b.razdalja(), 4)
a.naprej(5)
self.assertEqual(a.koordinate(), (10, -3))
self.assertEqual(a.razdalja(), 13)
class TestDodatna(unittest.TestCase):
def test_undo(self):
a = Minobot()
a.desno() # gleda dol
a.naprej(4)
a.levo() # gleda desno
a.naprej(1)
a.naprej(2)
self.assertEqual(a.koordinate(), (3, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (1, -4))
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (1, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.naprej(1)
self.assertEqual(a.koordinate(), (1, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.razveljavi() # spet gleda dol
self.assertEqual(a.koordinate(), (0, -4))
a.naprej(2)
self.assertEqual(a.koordinate(), (0, -6))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(3)
self.assertEqual(a.koordinate(), (0, -3))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # spet gleda desno
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(3)
self.assertEqual(a.koordinate(), (3, 0))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(2)
self.assertEqual(a.koordinate(), (2, 0))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
if __name__ == "__main__":
unittest.main()
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
fa05b5b661e3bea96d1294b31c5e7660ef47f866 | dfc827bf144be6edf735a8b59b000d8216e4bb00 | /CODE/Sympy/Forcings/DryStepandGaussian/withPW.py | 42fdd929c10f4463048033cea289f5817865fb7a | [] | no_license | jordanpitt3141/ALL | c5f55e2642d4c18b63b4226ddf7c8ca492c8163c | 3f35c9d8e422e9088fe096a267efda2031ba0123 | refs/heads/master | 2020-07-12T16:26:59.684440 | 2019-05-08T04:12:26 | 2019-05-08T04:12:26 | 94,275,573 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 10:38:52 2017
@author: jordan
"""
from sympy import *
from sympy.plotting import plot
def hxt(x,t,x0,h0,c,l):
nh = Piecewise(
(0,x <= (x0 + c*t) - l),
(0,x >= (x0 + c*t) + l),
(h0,True))
return nh
def uxt(x,t,x0,c,a0,a1,a2):
nu1 = a0*exp(-((x - c*t) - a1)**2/(2*a2))
return nu1
def Gxt(h,u):
ux = diff(u,x)
return u*h - diff(h**3/3*ux,x)
def hFluxx(h,u):
#First Part
F1 = diff(h*u,x)
#Second Part
return F1
def GFluxx(h,u,G,g):
#First Part
ux = diff(u,x)
F1 = diff(G*u + g/2.0*h*h - 2*h*h*h*ux*ux/3,x)
return F1
g =9.81
l = 10.0
x0 =1.0
h0 = 1.0
g =9.81
c = 2.0
a0 = h0
a1 = x0
a2 = l/4
x = Symbol('x')
t = Symbol('t')
h = hxt(x,t,x0,h0,c,l)
u = uxt(x,t,x0,c,a0,a1,a2)
G = Gxt(h,u)
ht = diff(h,t)
#G
Gt = diff(G,t)
hF = hFluxx(h,u)
GF= GFluxx(h,u,G,g)
| [
"jordanpitt3141@github.com"
] | jordanpitt3141@github.com |
5c051d64d82a12f43cd6487b00d35f56fd5e79ac | 6baedc3ef128b0c1118e637834cdfd2d512cd9b3 | /tastypie_user/tests/basic_urls.py | f7c80129bc5a28a72b44b8db2264eadff24e2ff1 | [] | no_license | Tsangares/tastypie-user | 7cca6ec0b0cb1532f89def5bd9f36316b34f5357 | 002ec995c2d72d9954580275312fadc48d1a1d24 | refs/heads/master | 2020-12-30T19:57:45.262578 | 2016-07-27T00:20:28 | 2016-07-27T00:20:28 | 64,263,242 | 0 | 0 | null | 2016-07-27T00:11:17 | 2016-07-27T00:11:16 | null | UTF-8 | Python | false | false | 281 | py | #coding:utf8
from django.conf.urls import patterns, include, url
from tastypie.api import Api
from tastypie_user.resources import UserResource
v1_api = Api(api_name='v1')
v1_api.register(UserResource())
urlpatterns = patterns(
'',
url(r'^api/', include(v1_api.urls)),
)
| [
"hepochen@gmail.com"
] | hepochen@gmail.com |
3e595e01f0470ac0c1a29c602b75c6312cbce930 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2613/60770/272911.py | 4e0e833966bf85b0b5337f9325d08963dc7bdfe4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | def solve():
n=int(input())
def solveOne():
des=int(input())
c1,c2,cur=0,0,0
res=[]
while c1<des:
cur+=1
res.append(cur)
c1+=1
for i in range(c2):
cur+=2
res.append(cur)
c1+=1
if c1==des:
return res
c2+=1
return res
for i in range(n):
res=solveOne()
res=list(map(str,res))
print(' '.join(res))
if __name__ == '__main__' :
solve() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
761c648992df279c078fddcb04854ee83de7ecfc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch19_2020_03_04_12_29_27_998230.py | 3cff36a41b429d89a268e91cc2e4757cc4684d65 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py |
def lados(x, y, z):
if a == b and a == c:
print("equilátero")
elif a == b or a == c or b ==c:
print("isósceles")
else:
print("escaleno") | [
"you@example.com"
] | you@example.com |
471b12ff69c7042464a74c387b98a1821688430a | e71ebc10db20b42964dbc5162c17c13c3dc23e32 | /examples/Atari2600/DQN-gym-Music.py | 5d9300b1d8e223b7be5a28b8c114a46c719ae466 | [
"Apache-2.0"
] | permissive | pkumusic/tensorpack | 18ea1a1e9960196f7930c9b540084e543c00030b | 2b535bcdb919cbf299c81e236055e3d52d054d56 | refs/heads/master | 2021-01-10T23:01:37.136541 | 2016-11-08T21:42:44 | 2016-11-08T21:42:44 | 72,312,328 | 1 | 0 | null | 2016-10-29T22:18:57 | 2016-10-29T22:18:56 | null | UTF-8 | Python | false | false | 6,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# DL Project for 10807
import argparse
import os
from tensorpack.predict.common import PredictConfig
from tensorpack import *
from tensorpack.models.model_desc import ModelDesc, InputVar
from tensorpack.train.config import TrainConfig
from tensorpack.tfutils.common import *
from tensorpack.callbacks.group import Callbacks
from tensorpack.callbacks.stat import StatPrinter
from tensorpack.callbacks.common import ModelSaver
from tensorpack.callbacks.param import ScheduledHyperParamSetter, HumanHyperParamSetter
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tensorpack.tfutils.symbolic_functions import huber_loss
from tensorpack.RL.expreplay import ExpReplay
from tensorpack.tfutils.sessinit import SaverRestore
from tensorpack.train.trainer import QueueInputTrainer
from tensorpack.RL.common import MapPlayerState
from tensorpack.RL.gymenv import GymEnv
from tensorpack.RL.common import LimitLengthPlayer, PreventStuckPlayer
from tensorpack.RL.history import HistoryFramePlayer
import common
from tensorpack.tfutils.argscope import argscope
from tensorpack.models.conv2d import Conv2D
from tensorpack.models.pool import MaxPooling
from tensorpack.models.fc import FullyConnected
from tensorpack.models.nonlin import LeakyReLU
STEP_PER_EPOCH = 6000
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
IMAGE_SHAPE3 = IMAGE_SIZE + (FRAME_HISTORY, ) # one state input
NUM_ACTIONS = None
GAMMA = 0.99
def get_player(viz=False, train=False, dumpdir=None):
pl = GymEnv(ENV_NAME, dumpdir=dumpdir)
def func(img):
return cv2.resize(img, IMAGE_SIZE[::-1]) #TODO: Do we really need to resize here? Check the original paper.
pl = MapPlayerState(pl, func)
global NUM_ACTIONS
NUM_ACTIONS = pl.get_action_space().num_actions()
if not train: # When testing
pl = HistoryFramePlayer(pl, FRAME_HISTORY)
#pl = PreventStuckPlayer(pl, 30, 1) #TODO: Need to know the start button. Is it different for each game?
pl = LimitLengthPlayer(pl, 30000) # 500s
return pl
common.get_player = get_player()
class Model(ModelDesc):
def _get_input_vars(self):
if NUM_ACTIONS is None:
p = get_player(); del p
return [InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'state'),
InputVar(tf.int64, (None,), 'action'),
InputVar(tf.float32, (None,), 'reward'),
InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'next_state'),
InputVar(tf.bool, (None,), 'isOver')]
def _get_DQN_prediction(self, image):
#TODO: Do we need to add other pre-processing? e.g., subtract mean
image = image / 255.0
#TODO: The network structure can be improved?
with argscope(Conv2D, nl=tf.nn.relu, use_bias=True): # Activation for each layer
l = Conv2D('conv0', image, out_channel=32, kernel_shape=5)
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, out_channel=32, kernel_shape=5)
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv2', l, out_channel=64, kernel_shape=4)
l = MaxPooling('pool2', l, 2)
l = Conv2D('conv2', l, out_channel=64, kernel_shape=3)
# the original arch
# .Conv2D('conv0', image, out_channel=32, kernel_shape=8, stride=4)
# .Conv2D('conv1', out_channel=64, kernel_shape=4, stride=2)
# .Conv2D('conv2', out_channel=64, kernel_shape=3)
l = FullyConnected('fc0', l, 512, nl=lambda x, name:LeakyReLU.f(x, 0.01, name))
l = FullyConnected('fct', l, NUM_ACTIONS, nl=tf.identity())
def _build_graph(self, inputs):
state, action, reward, next_state, isOver = inputs
predict_value = self._get_DQN_prediction() # N * NUM_ACTIONS #TODO: If we need self. here
action_onehot = tf.one_hot(action, NUM_ACTIONS, 1.0, 0.0) # N * NUM_ACTION
pred_action_value = tf.reduce_sum(predict_value * action_onehot, 1) # N,
### This is for tracking the learning process.
# The mean max-Q across samples. Should be increasing over training
max_pred_reward = tf.reduce_mean(tf.reduce_max(predict_value, 1),
name='predict_reward')
add_moving_summary(max_pred_reward)
with tf.variable_scope('target'): #TODO: Check the usage of variable scope in this context
targetQ_predict_value = self._get_DQN_prediction(next_state)
# DQN
best_v = tf.reduce_max(targetQ_predict_value, 1)
#TODO: Double-DQN
#TODO: Why we need stop_gradient here
target = reward + (1.0 - tf.cast(isOver, tf.float32)) * GAMMA * tf.stop_gradient(best_v)
cost = huber_loss(target - pred_action_value)
add_param_summary([('conv.*/W', ['histogram', 'rms']),
('fc.*/W', ['histogram', 'rms'])]) #TODO
self.cost = tf.reduce_mean(cost, name='cost')
def get_config():
logger.auto_set_dir()
M = Model()
lr = tf.Variable(0.001, trainable=False, name='learning_rate')
tf.scalar_summary('learning_rate', lr)
dataset_train = ExpReplay()
return TrainConfig(
dataset=dataset_train, # A dataflow object for training
optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),
callbacks=Callbacks([StatPrinter(), ModelSaver(),
ScheduledHyperParamSetter('learning_rate',[(80, 0.0003), (120, 0.0001)]) # No interpolation
# TODO: Some other parameters
]),
session_config = get_default_sess_config(0.6), # Tensorflow default session config consume too much resources.
model = M,
step_per_epoch=STEP_PER_EPOCH,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-g','--gpu', help='comma seperated list of GPU(s) to use.')
parser.add_argument('-l','--load', help='load model')
parser.add_argument('-e','--env', help='env', required=True)
parser.add_argument('-t','--task', help='task to perform',
choices=['play','eval','train'], default='train')
args=parser.parse_args()
ENV_NAME = args.env
# set NUM_ACTIONS
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.task != 'train':
assert args.load is not None
if args.task == 'train':
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
QueueInputTrainer(config).train()
else:
pass
| [
"635716260@qq.com"
] | 635716260@qq.com |
cbbc96886a8a3601f76b8cb5f74fb650fedebc88 | 187ce9d4df99208c3db0e44f3db6a3df4ce34717 | /Python/714.best-time-to-buy-and-sell-stock-with-transaction-fee.py | 668435baf3d4387e487a35af26095a21f332f620 | [] | no_license | zhoujf620/LeetCode-Practice | 5098359fbebe07ffa0d13fe40236cecf80c12507 | 8babc83cefc6722b9845f61ef5d15edc99648cb6 | refs/heads/master | 2022-09-26T21:49:59.248276 | 2022-09-21T14:33:03 | 2022-09-21T14:33:03 | 222,195,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | #
# @lc app=leetcode id=714 lang=python
#
# [714] Best Time to Buy and Sell Stock with Transaction Fee
#
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee/description/
#
# algorithms
# Medium (52.17%)
# Likes: 1124
# Dislikes: 33
# Total Accepted: 51K
# Total Submissions: 97.5K
# Testcase Example: '[1,3,2,8,4,9]\n2'
#
# Your are given an array of integers prices, for which the i-th element is the
# price of a given stock on day i; and a non-negative integer fee representing
# a transaction fee.
# You may complete as many transactions as you like, but you need to pay the
# transaction fee for each transaction. You may not buy more than 1 share of a
# stock at a time (ie. you must sell the stock share before you buy again.)
# Return the maximum profit you can make.
#
# Example 1:
#
# Input: prices = [1, 3, 2, 8, 4, 9], fee = 2
# Output: 8
# Explanation: The maximum profit can be achieved by:
# Buying at prices[0] = 1Selling at prices[3] = 8Buying at prices[4] = 4Selling
# at prices[5] = 9The total profit is ((8 - 1) - 2) + ((9 - 4) - 2) = 8.
#
#
#
# Note:
# 0 < prices.length .
# 0 < prices[i] < 50000.
# 0 .
#
#
# @lc code=start
class Solution(object):
def maxProfit(self, prices, fee):
"""
:type prices: List[int]
:type fee: int
:rtype: int
"""
if len(prices) < 2:
return 0
n = len(prices)
buy = [0 for _ in range(n)]
sell = [0 for _ in range(n)]
buy[0] = -prices[0]
for i in range(1, n):
buy[i] = max(buy[i-1], sell[i-1]-prices[i])
sell[i] = max(sell[i-1], buy[i-1] + prices[i]-fee)
return sell[-1]
n = len(prices)
pre_buy = -prices[0]
pre_sell = 0
for i in range(1, n):
buy = max(buy, pre_sell-prices[i])
sell = max(pre_sell, pre_buy + prices[i]-fee)
pre_buy = buy
pre_sell = sell
return sell
# @lc code=end
| [
"zhoujf620@zju.edu.cn"
] | zhoujf620@zju.edu.cn |
0415a072db46f9e506342f2ce972e9e9568e140b | ca5288ed60ad052b56e3e0048c3f9ed284250ffc | /google/colab/patches/__init__.py | a8cc48e82811e0ca904d00f749c7da0739657362 | [
"Apache-2.0"
] | permissive | googlecolab/colabtools | 06035e1340717f714febbdbd82565def075b3876 | f73520dc61e9fdddb62ac0ab1c897d81d893113d | refs/heads/main | 2023-09-06T07:15:54.745725 | 2023-08-30T22:37:51 | 2023-08-30T22:38:16 | 110,724,756 | 1,962 | 796 | Apache-2.0 | 2023-09-14T16:10:13 | 2017-11-14T17:49:51 | Python | UTF-8 | Python | false | false | 727 | py | """Colab-specific patches for functions."""
__all__ = ['cv2_imshow', 'cv_imshow']
import cv2
from IPython import display
import PIL
def cv2_imshow(a):
"""A replacement for cv2.imshow() for use in Jupyter notebooks.
Args:
a: np.ndarray. shape (N, M) or (N, M, 1) is an NxM grayscale image. For
example, a shape of (N, M, 3) is an NxM BGR color image, and a shape of
(N, M, 4) is an NxM BGRA color image.
"""
a = a.clip(0, 255).astype('uint8')
# cv2 stores colors as BGR; convert to RGB
if a.ndim == 3:
if a.shape[2] == 4:
a = cv2.cvtColor(a, cv2.COLOR_BGRA2RGBA)
else:
a = cv2.cvtColor(a, cv2.COLOR_BGR2RGB)
display.display(PIL.Image.fromarray(a))
cv_imshow = cv2_imshow
| [
"colaboratory-team@google.com"
] | colaboratory-team@google.com |
aa1ff3c7e533c928c807bd6b0e4bae0736525a75 | 94dbd40525692416ea8100d13b03ece73ee33f7f | /kgtk/utils/convert_embeddings_format.py | 093f6c52f6198f9bf4a7cc5cc1284474b8e56aeb | [
"MIT"
] | permissive | usc-isi-i2/kgtk | 7101129ce1dde646095803429d3c751bf87ae071 | c31ba4c33d5f925fdb66a487ba2e1184c9ca4254 | refs/heads/main | 2023-08-22T06:58:22.301262 | 2023-06-29T19:55:28 | 2023-06-29T19:55:28 | 234,676,361 | 325 | 53 | MIT | 2023-06-29T19:55:30 | 2020-01-18T03:34:48 | Jupyter Notebook | UTF-8 | Python | false | false | 7,987 | py | import sys
from pathlib import Path
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions, KgtkReaderMode
from kgtk.exceptions import KGTKException
from typing import Tuple, Optional
from kgtk.io.kgtkwriter import KgtkWriter
WORD2VEC = 'word2vec'
GPROJECTOR = 'gprojector'
class ConvertEmbeddingsFormat(object):
def __init__(self,
input_file: Path,
output_file: Path,
node_file: Path = None,
output_format: Optional[str] = WORD2VEC,
input_property: Optional[str] = 'embeddings',
metadata_columns: Optional[str] = None,
output_metadata_file: Path = None,
error_file: str = sys.stderr,
line_separator: str = ",",
**kwargs
):
if output_format not in (WORD2VEC, GPROJECTOR):
raise KGTKException(f'--output-format should be one of "{WORD2VEC}" or "{GPROJECTOR}')
self.input_file = input_file
self.output_file = output_file
self.node_file = node_file
self.output_format = output_format.lower()
self.input_property = input_property
self.line_separator = line_separator
if output_metadata_file is None:
output_metadata_file = f"{str(Path.home())}/kgtk_embeddings_gprojector_metadata.tsv"
self.output_metadata_file = output_metadata_file
self.metadata_columns = metadata_columns.split(",") if metadata_columns is not None else []
self.error_file = error_file
self.reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)
def process(self):
kw_metadata = None
node_metadata = {}
row_count = 0
if self.output_format == GPROJECTOR:
node_metadata = self.read_node_file()
metadata_column_names = node_metadata['column_names'] if node_metadata else self.metadata_columns
kw_metadata = KgtkWriter.open(file_path=self.output_metadata_file,
error_file=self.error_file,
column_names=metadata_column_names,
mode=KgtkWriter.Mode.NONE,
require_all_columns=False,
no_header=False)
kw: KgtkWriter = KgtkWriter.open(file_path=self.output_file,
error_file=self.error_file,
column_names=[],
mode=KgtkWriter.Mode.NONE,
require_all_columns=False,
no_header=True)
kr: KgtkReader = KgtkReader.open(self.input_file,
error_file=self.error_file,
mode=KgtkReaderMode.EDGE,
options=self.reader_options
)
if self.node_file is None and self.output_format == GPROJECTOR and len(self.metadata_columns) > 0:
columns_not_found = []
for col in self.metadata_columns:
if col not in kr.column_name_map:
columns_not_found.append(col)
if len(columns_not_found) > 0:
kr.close()
kw.close()
if kw_metadata:
kw_metadata.close()
raise KGTKException(
f"The following metadata columns are not found in the input file: {','.join(columns_not_found)}")
if self.output_format == WORD2VEC:
# first line is number of vectors and vector dimension
line_count, dimension_count = self.count_lines(kr)
kw.writeline(f"{line_count} {dimension_count}")
kr.close()
kr: KgtkReader = KgtkReader.open(self.input_file,
error_file=self.error_file,
mode=KgtkReaderMode.EDGE,
options=self.reader_options
)
for row in kr:
node1 = row[kr.node1_column_idx]
node2 = row[kr.node2_column_idx]
if row[kr.label_column_idx] == self.input_property:
if self.output_format == WORD2VEC:
kw.writeline(f"{node1} {self.line_to_w2v(node2)}")
elif self.output_format == GPROJECTOR:
if row_count == 10000: # google projector will only handle upto 10000 rows
break
row_count += 1
node2_gp = self.line_to_gprojector(node2)
node1_metadata = ""
if node2_gp != "":
kw.writeline(node2_gp)
if node_metadata:
if node1 in node_metadata:
node1_metadata = "\t".join(node_metadata[node1].values())
elif len(self.metadata_columns) > 0:
values = [row[kr.column_name_map[x]] for x in self.metadata_columns]
node1_metadata = "\t".join(values)
kw_metadata.writeline(node1_metadata)
kw.close()
kr.close()
if kw_metadata:
kw_metadata.close()
def count_lines(self, kr: KgtkReader) -> Tuple[int, int]:
line_count = 0
dimension_count = 0
for row in kr:
node2 = row[kr.node2_column_idx]
if row[kr.label_column_idx] == self.input_property:
if dimension_count == 0:
dimension_count = len(node2.split(self.line_separator))
line_count += 1
if line_count == 0 and dimension_count == 0:
raise KGTKException(f"Zero rows in the input file with property: {self.input_property}")
return line_count, dimension_count
def line_to_w2v(self, line: str) -> str:
if line is None or line.strip() == "":
return ""
return " ".join(line.strip().split(self.line_separator))
def line_to_gprojector(self, line: str) -> str:
if line is None or line.strip() == "":
return ""
return "\t".join(line.strip().split(self.line_separator))
def read_node_file(self) -> dict:
node_metadata = {}
if self.node_file is None:
return node_metadata
kr_node: KgtkReader = KgtkReader.open(self.node_file,
error_file=self.error_file,
options=self.reader_options,
mode=KgtkReaderMode.NONE,
)
columns_not_found = []
if len(self.metadata_columns) > 0:
for col in self.metadata_columns:
if col not in kr_node.column_names:
columns_not_found.append(col)
node_metadata['column_names'] = self.metadata_columns
else:
node_metadata['column_names'] = kr_node.column_names
if len(columns_not_found) > 0:
kr_node.close()
raise KGTKException(
f"The following metadata columns are not found in the node file: {','.join(columns_not_found)}")
for row in kr_node:
if len(row) == len(kr_node.column_names):
node_id = row[kr_node.id_column_idx]
if node_id not in node_metadata:
node_metadata[node_id] = dict()
for col in node_metadata['column_names']:
node_metadata[node_id][col] = row[kr_node.column_name_map[col]]
kr_node.close()
return node_metadata
| [
"amandeep.s.saggu@gmail.com"
] | amandeep.s.saggu@gmail.com |
48d8978f351f9865201ec1dfa3f8eca0b6c63dc3 | 67f2c5c121842429acbb569d09548d5eecc0674a | /assembler/term.py | 5b2c37f145bcfe3bd0e1544e17fc52c91f40509c | [
"MIT"
] | permissive | paulscottrobson/next-hla-v1 | 29a56753a9e0d7e7d90e2f686d3d4dbb24550bf6 | 735e7a3e2f1208e6b8c7c2bb125ff2733e7fa93a | refs/heads/master | 2020-04-11T16:57:30.117092 | 2018-12-16T17:23:23 | 2018-12-16T17:23:23 | 161,942,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | # ***************************************************************************************
# ***************************************************************************************
#
# Name : term.py
# Author : Paul Robson (paul@robsons.org.uk)
# Date : 16th December 2018
# Purpose : Extract a term from a stream
#
# ***************************************************************************************
# ***************************************************************************************
from errors import *
from streams import *
from elements import *
from dictionary import *
from democodegen import *
import os,re
# ***************************************************************************************
# Extract a term. Returns [isAddress,address]
# ***************************************************************************************
class TermExtractor(object):
def __init__(self,parser,codeGenerator,dictionary):
self.parser = parser
self.codeGenerator = codeGenerator
self.dictionary = dictionary
#
# Extract a tern
#
def extract(self):
element = self.parser.get()
#print("["+element+"]")
#
# Nothing
#
if element == "":
raise AssemblerException("Missing term")
#
# - <Constant term>
#
if element == "-":
term = self.extract()
if term[0]:
print("Can only apply unary minus to constants")
term[1] = (-term[1]) & 0xFFFF
return term
#
# Constant integer. Also hex and 'character', the parser handles this.
#
if element[0] >= '0' and element[0] <= '9':
return [False,int(element,10)]
#
# Identifier.
#
if element[0] >= 'a' and element[0] <= 'z':
dEntry = self.dictionary.find(element)
if dEntry is None:
raise AssemblerException("Unknown identifier "+element)
if isinstance(dEntry,ConstantIdentifier):
return [False,dEntry.getValue()]
if isinstance(dEntry,VariableIdentifier):
return [True,dEntry.getValue()]
raise AssemblerException("Cannot use "+element+" in expression.")
#
# Identifier address.
#
if element[0] == '@':
element = self.parser.get()
dEntry = self.dictionary.find(element)
if dEntry is None:
raise AssemblerException("Unknown identifier "+element)
if isinstance(dEntry,VariableIdentifier):
return [False, dEntry.getValue() & 0xFFFF]
raise AssemblerException("Cannot use @ operator on "+element)
#
# String
#
if element[0] == '"':
strAddr = self.codeGenerator.stringConstant(element[1:-1])
return [False,strAddr]
#
# Give up !
#
raise AssemblerException("Bad term "+element)
if __name__ == "__main__":
tas = TextArrayStream("""
$7FFE 65321 'x' -4 38
locvar glbvar const1 -const1
"hello world"
// String
// @identifier (var only)
""".split("\n"))
tx = TermExtractor(ElementParser(tas),DemoCodeGenerator(),TestDictionary())
while True:
print(tx.extract())
| [
"paul@robsons.org.uk"
] | paul@robsons.org.uk |
bc5983aa09d3c821627a922acd286c8c4c280628 | 8c046cb97abb3626c53c4e276dfc1779feeaf6ce | /setup.py | 270b1c4fba3e8ff567a9aa08f1252a8ec885f040 | [
"MIT"
] | permissive | zwlyn/pyfy | 7871e4cd9d39e8f0ad23a449b498175b2337b3e7 | 909a08458b9567fa6686019fd5630ce8e5003231 | refs/heads/main | 2023-04-10T10:28:28.334173 | 2020-11-05T09:30:26 | 2020-11-05T09:30:26 | 310,176,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from setuptools import setup, find_packages
setup(
name = "py-fy",
version = "2.0.4",
keywords = ("pip", "py-fy","translate", "fy"),
description = "An Word-translation software stranslate between simple-Chinese and English",
long_description = "An Word-translation software translate between simple-Chinese and English",
license = "MIT Licence",
url = "https://github.com/zwlyn/pyfy",
author = "zwlyn",
author_email = "1666013677@qq.com",
packages = find_packages(),
include_package_data = True,
platforms = "any",
install_requires = ["pymouse", "pyinstaller", "requests"],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
]
)
| [
"1666013677@qq.com"
] | 1666013677@qq.com |
707aaacb576e895b003381a9a67c488cb0c39148 | 7aecab27c231c5207f26a1682543b0d6c5093c06 | /server/dancedeets/nlp/styles/poi_staff.py | 2c14933c1d53e223439bd3139616e9075a5b1819 | [] | no_license | mikelambert/dancedeets-monorepo | 685ed9a0258ea2f9439ae4ed47ebf68bb5f89256 | 4eff1034b9afd3417d168750ea3acfaecd20adc6 | refs/heads/master | 2022-08-10T07:16:32.427913 | 2018-04-15T22:05:58 | 2018-04-15T22:05:58 | 75,126,334 | 24 | 2 | null | 2022-07-29T22:28:45 | 2016-11-29T22:04:44 | Python | UTF-8 | Python | false | false | 3,497 | py | # -*-*- encoding: utf-8 -*-*-
from dancedeets.nlp import base_auto_classifier
from dancedeets.nlp import dance_keywords
from dancedeets.nlp import grammar
from dancedeets.nlp import style_base
Any = grammar.Any
Name = grammar.Name
connected = grammar.connected
commutative_connected = grammar.commutative_connected
POI = Any(
u'poi', # english
u'ポイ', # japanese
u'포이', # korean
)
FIRE = Any(
u'apoy', # tagalog
u'ateş', # turkish
#u'brand', # danish
u'brann', # norwegian
u'foc', # romanian
u'feu', # french
u'fuego', # spanish
u'fuoco', # italian
u'fogo', # portuguese
u'eld', # swedish
u'feuer', # german
u'fire dance', # english
u'palo', # finnish
u'požáru', # czech
u'ognia', # polish
#u'tarian api', # malay
u'tűz', # hungarian
u'ugnies', # lithuanian
u'vatreni', # croatian
u'vuur', # dutch
u'bốc lửa', # vietnamese
u'φωτιά', # greek
u'оган', # macedonian
u'огненный', # russian
u'אש', # hebrew
u'النار', # arabic
u'ไฟ', # thai
u'火災', # japanese
u'火', # chinese simplified
u'불', # korean
)
STAFF = Any(
'staff',
u'スタッフ',
)
GOOD_DANCE = Any(
POI,
commutative_connected(Any(FIRE, STAFF), dance_keywords.EASY_DANCE),
'contact staff',
)
FIRE_KEYWORDS = [
u'apoy sayaw', # tagalog
u'ateş dansı', # turkish
u'branddans', # danish
u'branndans', # norwegian
u'dans de foc', # romanian
u'danse du feu', # french
u'danza del fuego', # spanish
u'danza del fuoco', # italian
u'dança de fogo', # portuguese
u'elddans', # swedish
u'feuertanz', # german
u'fire dance', # english
u'palo tanssia', # finnish
u'tanec požáru', # czech
u'taniec ognia', # polish
u'tarian api', # malay
u'tűz tánc', # hungarian
u'ugnies šokis', # lithuanian
u'vatreni ples', # croatian
u'vuur dans', # dutch
u'điệu nhảy bốc lửa', # vietnamese
u'χορός φωτιά', # greek
u'оган танц', # macedonian
u'огненный танец', # russian
u'ריקוד אש', # hebrew
u'رقصة النار', # arabic
u'เต้นรำไฟ', # thai
u'火災のダンス', # japanese
u'火舞', # chinese simplified
u'불춤', # korean
]
POI_KEYWORDS = [
u'poi', # english
u'полизоя', # russian
u'פוי', # hebrew
u'البوي', # arabic
u'ポイ', # japanese
u'포이', # korean
]
class Classifier(base_auto_classifier.DanceStyleEventClassifier):
GOOD_DANCE = GOOD_DANCE
ADDITIONAL_EVENT_TYPE = Any()
def _quick_is_dance_event(self):
return True
class Style(style_base.Style):
@classmethod
def get_name(cls):
return 'POI_STAFF'
@classmethod
def get_rare_search_keywords(cls):
return FIRE_KEYWORDS + POI_KEYWORDS
@classmethod
def get_popular_search_keywords(cls):
return [
u'poi',
u'poi dance',
u'fire dance',
u'contact staff',
u'staff dance',
]
@classmethod
def get_search_keyword_event_types(cls):
return []
@classmethod
def get_preprocess_removal(cls):
return {
'it': grammar.Any('poi') # then
}
@classmethod
def _get_classifier(cls):
return Classifier
| [
"mlambert@gmail.com"
] | mlambert@gmail.com |
d9dfeddf3c766162eaa6ebf6ca27e9baf3b9d3ba | 50e9d86f50966ea2626c45c6a823340a2f0a7247 | /website/stocks/const.py | 695441a03a75673756c054eae428d5671b634c8e | [] | no_license | wgcgxp/zjsxzy_in_js | 82d5dd4e650fddfcf30806b45c0583a658614518 | 219195b38016279b145b9105e10f4785023d9879 | refs/heads/master | 2023-02-12T06:49:12.683902 | 2021-01-13T10:28:43 | 2021-01-13T10:28:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | # DATA_DIR = '\\\\hfm-pubshare\HFM各部门共享\Harvest嘉实全体员工\策略孵化组\多重风管分组审核\zhangyang\股票池'
DATA_DIR = '\\\\hg-pubshare\HFM各部门共享\资产配置投资研究中心\张炀\股票池'
STOCKS_LIST_FILE = '%s/股票池.xlsx'%(DATA_DIR)
INSIDE_OUTSIDE_STOCKS_FILE = '%s/细分行业龙头公司.xlsx'%(DATA_DIR)
FUND_STOCKS_FILE = '%s/基金重仓股.xlsx'%(DATA_DIR)
CONNECT_STOCKS_FILE = '%s/北向资金重仓股.xlsx'%(DATA_DIR)
SHEET_NAMES = ['金融_小组', '大宗商品_小组', '可选消费_小组',
'基础设施_小组', '互联网_小组', '器械商业生物药-主_小组',
'化药中药服务-主_小组', '医药3_小组', '房地产_小组',
'软件_小组', '硬件_小组', '初级消费品_小组',
'高端装备_小组', '汽车_小组', '新材料_小组',
'新建造_小组', '新能源_小组', '食品饮料-主_小组']
STOCKS_CODE_FILE = '%s/股票代码名单.xlsx'%(DATA_DIR)
HKSTOCKS_CODE_FILE = '%s/港股股票代码名单.xlsx'%(DATA_DIR)
INDUSTRY_FILE = '%s/申万行业分类表.xlsx'%(DATA_DIR)
TOP_STOCKS_FILE = '%s/行业龙头.xlsx'%(DATA_DIR)
TOP_STOCKS_LIST_FILE = '%s/行业龙头股票池.xlsx'%(DATA_DIR) | [
"zjsxzy@gmail.com"
] | zjsxzy@gmail.com |
271e5130c602f5f24d932c201066641f173cb104 | a4d6e0c8fd2f10360f41a378256d887988234a37 | /fjord/journal/models.py | c060d0d1d3a0efeacaef91e1caa398567b9166e0 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | aokoye/fjord | 190467c0a5fe7c353dfd333b0fad5eba9f611ae9 | e1b62b583c8fdccc07bda61c3fe976a9db959b99 | refs/heads/master | 2021-01-15T11:49:35.882367 | 2015-03-03T21:53:38 | 2015-03-03T21:53:38 | 24,608,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,579 | py | from datetime import datetime, timedelta
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from fjord.base.models import JSONObjectField
RECORD_INFO = u'info'
RECORD_ERROR = u'error'
class RecordManager(models.Manager):
@classmethod
def log(cls, type_, app, src, action, msg, instance=None, metadata=None):
msg = msg.encode('utf-8')
metadata = metadata or {}
rec = Record(
app=app,
type=type_,
src=src,
action=action,
msg=msg,
metadata=metadata
)
if instance:
rec.content_object = instance
rec.save()
return rec
def recent(self, app):
return (self
.filter(app=app)
.filter(created__gte=datetime.now() - timedelta(days=7)))
def records(self, instance):
return (
self
.filter(object_id=instance.id,
content_type=ContentType.objects.get_for_model(instance))
)
class Record(models.Model):
"""Defines an audit record for something that happened in translations"""
TYPE_CHOICES = [
(RECORD_INFO, RECORD_INFO),
(RECORD_ERROR, RECORD_ERROR),
]
# What app does this apply to
app = models.CharField(max_length=50)
# What component was running (e.g. "gengo-machine", "dennis", ...)
src = models.CharField(max_length=50)
# The type of this message (e.g. "info", "error", ...)
type = models.CharField(choices=TYPE_CHOICES, max_length=20)
# What happened to create this entry (e.g. "guess-language",
# "translate", ...)
action = models.CharField(max_length=20)
# The message details in English (e.g. "unknown language",
# "unsupported language", ...)
msg = models.CharField(max_length=255)
# Generic foreign key to the object this record is about if any
content_type = models.ForeignKey(ContentType, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = generic.GenericForeignKey()
# When this log entry was created
created = models.DateTimeField(default=datetime.now)
# Any metadata related to this entry in the form of a Python dict which
# is stored as a JSON object
metadata = JSONObjectField()
objects = RecordManager()
def __unicode__(self):
return u'<Record {key} {msg}>'.format(
key=u':'.join([self.src, self.type, self.action]),
msg=self.msg)
| [
"willkg@mozilla.com"
] | willkg@mozilla.com |
148544d6c1ba866870d3b12cc3124c4ed5a50e2c | 81fe5fd8a5736c68c02a96f0463bdfb1b2686d95 | /tests/unit/test_partition.py | ed585759d39fda3c82e78dd63862428b87f7bc50 | [
"Apache-2.0"
] | permissive | jeffbuttars/acos-client | be77985cb4d2eb930d6c39e3c671befdde5831ef | 809ca6cb52c9564465448bf63a9ff4481e8727ec | refs/heads/master | 2020-12-24T23:38:47.172733 | 2014-07-15T22:07:59 | 2014-07-15T22:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | # Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import acos_client.errors as acos_errors
import v21_mocks as mocks
class TestPartition(unittest.TestCase):
# Test harness bug with delete
# def test_partition_delete(self):
# with mocks.PartitionDelete().client() as c:
# c.system.partition.delete('p1')
# def test_partition_delete_not_found(self):
# with mocks.PartitionDeleteNotFound().client() as c:
# c.system.partition.delete('p1')
def test_partition_create(self):
with mocks.PartitionCreate().client() as c:
c.system.partition.create('p1')
def test_partition_create_exists(self):
with mocks.PartitionCreateExists().client() as c:
with self.assertRaises(acos_errors.Exists):
c.system.partition.create('p1')
def test_partition_exists(self):
with mocks.PartitionExists().client() as c:
self.assertTrue(c.system.partition.exists('p1'))
def test_partition_exists_not_found(self):
with mocks.PartitionExistsNotFound().client() as c:
self.assertFalse(c.system.partition.exists('p1'))
def test_partition_active(self):
with mocks.PartitionActive().client() as c:
c.system.partition.active('p1')
def test_partition_active_not_found(self):
with mocks.PartitionActiveNotFound().client() as c:
with self.assertRaises(acos_errors.NotFound):
c.system.partition.active('p1')
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
03ec305a8c2796464c907a7857d55dc51464f46a | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/victorclf/sheep.py | 440f14ebcf4b2d203a48df050266749ae105662f | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
ALL_DIGITS = 2**10 - 1
def addDigitsFromNumToMap(n, digitMap):
while n > 0:
d = n % 10
digitMap = digitMap | 2**d
n /= 10
return digitMap
def isMapComplete(digitMap):
return digitMap == ALL_DIGITS
def solve(n):
if n < 1:
return "INSOMNIA"
digitMap = 0
currentN = n
while 1:
digitMap = addDigitsFromNumToMap(currentN, digitMap)
if isMapComplete(digitMap):
return currentN
currentN += n
def main():
testCases = int(raw_input())
for case in xrange(1, testCases + 1):
n = int(raw_input())
print 'Case #%d: %s' % (case, str(solve(n)))
if __name__ == '__main__':
main()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
f50aba7a1dba65eef40ef2bbff4d6a9e972b5396 | ba113c57a0fcf6baaac275f9ecf31362e83f0aaa | /linear_algebra/18_matrix_dot_product_diff_dimensions.py | fc5be0664654df84672fe2f48842ab690106bd49 | [] | no_license | Shishir-rmv/oreilly_math_fundamentals_data_science | aabc7273565fae92d542cdc683b963ac0168b565 | 9283cc50655ef40616bb93829be400cff90dfd37 | refs/heads/main | 2023-08-16T09:26:29.718212 | 2021-10-04T15:53:14 | 2021-10-04T15:53:14 | 413,713,138 | 1 | 0 | null | 2021-10-05T07:17:20 | 2021-10-05T07:17:19 | null | UTF-8 | Python | false | false | 169 | py | from numpy import array
v = array([[3, 1],
[2, 0]])
w = array([[1, 2, 0],
[3, 1, 1]])
dot_product = v.dot(w)
print(dot_product)
| [
"thomasnield@live.com"
] | thomasnield@live.com |
6aca161da9ec5847398a2930cd89ee456a28da48 | f37859eb883ffc6589f4ec8ba66dd9985849684e | /backend/icy_water_dev_20174/settings.py | 5d66b608dfe7f5c6b81a7d5488b0512eca6d2bd7 | [] | no_license | crowdbotics-apps/icy-water-dev-20174 | 97bd95faf09df9bd4edc83dd4477087d213d6256 | ee4db18152807acec6e5f4b530aab812f338f7d0 | refs/heads/master | 2023-03-20T10:37:58.843780 | 2021-03-04T21:27:45 | 2021-03-04T21:27:45 | 344,614,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,123 | py | """
Django settings for icy_water_dev_20174 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'icy_water_dev_20174.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'icy_water_dev_20174.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d478184abfebc687a4ec488a0ff22c70b9edde41 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_sessions_response.py | 75b03ab3c3ca6e3927fdb1a30da3f1fa939e69a6 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,226 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListSessionsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total_count': 'int',
'sessions': 'list[QuerySessionResponse]'
}
attribute_map = {
'total_count': 'total_count',
'sessions': 'sessions'
}
def __init__(self, total_count=None, sessions=None):
"""ListSessionsResponse
The model defined in huaweicloud sdk
:param total_count: 总记录数。
:type total_count: int
:param sessions: 具体信息。
:type sessions: list[:class:`huaweicloudsdkdds.v3.QuerySessionResponse`]
"""
super(ListSessionsResponse, self).__init__()
self._total_count = None
self._sessions = None
self.discriminator = None
if total_count is not None:
self.total_count = total_count
if sessions is not None:
self.sessions = sessions
@property
def total_count(self):
"""Gets the total_count of this ListSessionsResponse.
总记录数。
:return: The total_count of this ListSessionsResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ListSessionsResponse.
总记录数。
:param total_count: The total_count of this ListSessionsResponse.
:type total_count: int
"""
self._total_count = total_count
@property
def sessions(self):
"""Gets the sessions of this ListSessionsResponse.
具体信息。
:return: The sessions of this ListSessionsResponse.
:rtype: list[:class:`huaweicloudsdkdds.v3.QuerySessionResponse`]
"""
return self._sessions
@sessions.setter
def sessions(self, sessions):
"""Sets the sessions of this ListSessionsResponse.
具体信息。
:param sessions: The sessions of this ListSessionsResponse.
:type sessions: list[:class:`huaweicloudsdkdds.v3.QuerySessionResponse`]
"""
self._sessions = sessions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSessionsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
2d869887dd805283d9ad87531e3cd300eb36e5f3 | 2b4badbedab24ed4376ab65818d0e59af6539144 | /env_workspace1/margin-softmax/function_test.py | ae183b3a9ba724c318d85df2d1c6d63b93fa5a86 | [] | no_license | matthewangbin/Python | 878d8180d12d235f8d238574414bb41edad5ceee | c9a94b4203380a06364da1f7466aafc4b141d951 | refs/heads/master | 2021-10-11T01:45:22.890144 | 2019-01-21T02:44:25 | 2019-01-21T02:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # -*- coding: utf-8 -*-
# @Time : 2018/9/14 15:08
# @Author : Matthew
# @Site :
# @File : function_test.py
# @Software: PyCharm
def strQ2B(ustring): # 全角转半角
rstring = ''
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 12288: # 全角空格直接转换
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): # 全角字符(除空格)根据关系转化
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
if __name__=="__main__":
print strQ2B(u'2000') | [
"wb2847@163.com"
] | wb2847@163.com |
3b3d89ef7e4f286a1355fd5e3e7d9b4dc937dd5a | cc9bb5470bc03d96fd89f360ca040868a773d0f1 | /timehistogram/s501_time_histogram.py | fc913e6b601d7e9619a4c16ce219e69641e36479 | [] | no_license | tomkooij/lio-project | e75f664e2a3fd6928296c1db50b7c342dfa1f2d9 | 3b2f557f974b5ae464815e025209112ef554e7dc | refs/heads/master | 2020-05-21T22:12:55.218309 | 2019-08-08T11:24:31 | 2019-08-08T11:24:31 | 24,632,133 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,587 | py | """
Read data from station 501 and plot t1-t2 histogram
Goal: recreate graphs form D.Pennink 2010
t1-t2 from station 501, FULL YEAR 2010
ph1 > TRIGGER = charged particle
ph1 < TRIGGER = gamma
"""
import datetime
import tables
import sapphire.esd
import scipy.stats
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
# time walk correction function
t_walk = lambda x: 8.36 * np.exp(-0.06702*(x-20.)) + 5.22 # fit from walk.py
STATION = 501
STATIONS = [STATION]
START = datetime.datetime(2010,4,1)
END = datetime.datetime(2010,5,1)
FILENAME = 'station_501_april2010.h5'
#
# Pennink, 2010 p32 specifies these cutoff ADC counts
# >200 ADC count = charged particle
# <120 ADC counts = gamma
# These values are consistent with a pulseheight histogram
#
HIGH_PH = 200
LOW_PH = 120
#
# Read event data from the ESD
# store in table `/sSTATION' for example: /s501
#
def create_new_event_file(filename, stations, start, end):
print "creating file: ",filename
data = tables.open_file(filename,'w')
print "reading from the ESD"
for station in stations:
print "Now reading station %d" % station
sapphire.esd.download_data(data, '/s%d' % station, station, START, END)
return data
#
# Open existing coincidence table.
# Only check if "/coincidences" are in table, no other checks
def open_existing_event_file(filename):
data = tables.open_file(FILENAME, 'a')
return data
#
#
# Least squares fit of histogram data to guassian distribution
# Includes y-scale factor, ignores y-offset
#
# Source: http://stackoverflow.com/a/15521359
#
# histogram_y = array of y data
# histogram_x = array of middle of bins
#
#
# least squares fit of gaussian distribution
#
fitfunc = lambda p, x: p[0]*np.exp(-0.5*((x-p[1])/p[2])**2)
errfunc = lambda p, x, y: (y - fitfunc(p, x))
def gauss_fit_histogram(histogram_y, histogram_x):
init = [1.0, 0.5, 0.5]
out = leastsq( errfunc, init, args=(histogram_x, histogram_y))
c = out[0]
print "A exp[-0.5((x-mu)/sigma)^2]"
print "Fit Coefficients:"
print c[0],c[1],abs(c[2])
return c
#
# Create a pylab (sub)plot with histogram and guassfit
#
# Usage:
#
# import matplotlib.pyplot as plt
# grafiek = plt.figure()
# dt_data = [ ... datapoints ...]
# bins = arrange( )
# bins_middle = arrange()
# title = "Data histogram"
# plot_histogram_with_gaussfit(dt_data, bins, bins_middle, grafiek, title)
# plt.show()
def plot_histogram_with_gaussfit(dt_data, bins_edges, bins_middle, grafiek, title):
print "Number of datapoints (events): %d" % dt_data.size
grafiek.hist(dt_data, bins=bins_edges)
#
# Create histogram array
#
ydata = histogram(dt_data, bins=bins_edges)
histogram_y = ydata[0]
histogram_x = bins_middle
c = gauss_fit_histogram(histogram_y, histogram_x)
grafiek.set_title(title)
# dit moet eigenlijk relatief en geen absolute x,y coordinaten in de grafiek zijn
# grafiek.text(-150,100,r'$\mu=100,\ \sigma=15$')
grafiek.plot(histogram_x, fitfunc(c, histogram_x))
if __name__=='__main__':
#data = create_new_event_file(FILENAME, STATIONS, START, END)
#data.close()
if 'data' not in globals():
data = open_existing_event_file(FILENAME)
events = data.root.s501.events
t1 = events.col('t1')
t2 = events.col('t2')
t3 = events.col('t3')
t4 = events.col('t4')
ph = events.col('pulseheights')
ph1 = ph[:,0]
ph2 = ph[:,1]
ph3 = ph[:,2]
ph4 = ph[:,3]
#bins2ns5 = arange(-201.25,202.26,2.5)
bins2ns5 = np.arange(-51.25,51.26,2.5)
bins2ns5_midden = np.arange(-50,50.1,2.5)
#
# TODO: 4 subplots to recreate the figure from Pennink 2010
#
grafiek = plt.figure()
# time walk correction
t1_corr = t1 - t_walk(ph1)
t2_corr = t2 - t_walk(ph2)
#
# Plot histogram for t1-t2 using event selection based on pulseheight
dt = t1 - t2
dt_corr = t1_corr - t2_corr
# gemiddelden van de gaussianfit
OFFSET1 = 0
OFFSET2 = -2.5
# remove -1 and -999
# select events based on pulseheight
dt1_corr = dt_corr.compress((t1 >= 0) & (t2 >= 0) & (ph1 < LOW_PH) & (ph2 > HIGH_PH))
dt1 = dt.compress((t1 >= 0) & (t2 >= 0) & (ph1 < LOW_PH) & (ph2 > HIGH_PH))
print "number of events", dt1.size
n1, bins1, blaat1 = plt.hist(dt1-OFFSET1, bins=bins2ns5, histtype='step')
n2, bins2, blaat2 = plt.hist(dt1_corr-OFFSET2, bins=bins2ns5, histtype='step')
plt.title('s501, april 2010, delta PMT 1 - PMT 2')
plt.legend(['t1-t2','-walk'], loc=1)
plt.show()
| [
"tomkooij@tomkooij.nl"
] | tomkooij@tomkooij.nl |
a153f08aa078f70702e7678341d4a4370cdcf228 | f9a8ee37334771f37edda863db08a7dcccc9522f | /AtCoder/Practice/茶緑埋め/ACL Beginners Contest C.py | 9824c4c7387cc1d74744948c7e983b41a21c0536 | [] | no_license | shimmee/competitive-programming | 25b008ee225858b7b208c3f3ca7681e33f6c0190 | 894f0b7d557d6997789af3fcf91fe65a33619080 | refs/heads/master | 2023-06-07T13:07:17.850769 | 2021-07-05T17:20:47 | 2021-07-05T17:20:47 | 331,076,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | # ACL Beginners Contest C - Connect Cities
# URL: https://atcoder.jp/contests/abl/tasks/abl_c
# Date: 2021/02/02
# ---------- Ideas ----------
# 連結成分のグループ数を数えればいい。グループ数-1が答え
# Union-Find使う
# ------------------- Answer --------------------
#code:python
class UnionFind():
def __init__(self, n):
self.n = n
self.siz = [1] * n
self.par = [-1] * n # 自分が根の場合,根を-1と表記: 初期状態ではみんなバラバラなのでみんな-1
# 根を求める
def root(self, x):
if self.par[x] == -1:
return x
else:
self.par[x] = self.root(self.par[x]) # 経路圧縮
return self.par[x]
# xとyが同じグループに属するかどうか (根が一致するかどうか)
def same(self, x, y):
return self.root(x) == self.root(y)
# xを含むグループとyを含むグループとを併合する
def unite(self, x, y):
# x, yをそれぞれ根まで移動する
x, y = self.root(x), self.root(y)
# すでに同じグループのときは何もしない
if x == y: return False
# union by size (y側のサイズが小さくなるようにする: xとyをスワップする: xをyの親にしたい)
if self.siz[x] < self.siz[y]:
x, y = y, x
# yをxの子とする
self.par[y] = x
self.siz[x] += self.siz[y]
return True
# xを含むグループのサイズ
def size(self, x):
return self.siz[self.root(x)]
# グループ数を返す
def group_count(self):
parents = [i for i, x in enumerate(self.par) if x == -1]
return len(parents)
# 各グループに属する要素をリストで返す
def all_group_members(self):
from collections import defaultdict
group_members = defaultdict(list)
for member in range(self.n):
group_members[self.root(member)].append(member)
return list(group_members.values())
n, m = map(int, input().split())
uf = UnionFind(n)
for i in range(m):
a, b = map(int, input().split())
a -= 1
b -= 1
# 辺(a, b)の追加によってサイクルが形成されるときは追加しない
# aとbが同じ親を持っていれば,サイクルが生まれることになる
if uf.same(a, b): continue
# 辺(a, b)を追加する
uf.unite(a, b)
print(uf.group_count()-1)
# ------------------ Sample Input -------------------
3 1
1 2
# ----------------- Length of time ------------------
# 4分
# -------------- Editorial / my impression -------------
# 公式解説がない
# Union-Find貼り付けたらいけた!
# けんちょんさん: https://drken1215.hatenablog.com/entry/2020/09/27/080100
# BFSやDFSでも解けるよね。
# ----------------- Category ------------------
#AtCoder
#Union-Find
#ABC-like
#ABC-C
#茶diff
#連結成分
| [
"shinmeikeita@gmail.com"
] | shinmeikeita@gmail.com |
604ee7ac9a4b18c10362a4a1fd27d71fd20045d2 | 9930f08717594022e0f7fde2a96baaa7fcfce784 | /To calculate length of the string.py | 374517037db7272c4100a7b711d85a45f353c391 | [] | no_license | dinesh5555/python_assignments | 72bd2d1cc35a92a01826536eeb4107953d8d73c7 | 33fbcbe1de8f92bd6ffe07fa66640ce1ab84a756 | refs/heads/master | 2022-11-11T18:42:41.621053 | 2020-07-03T09:12:49 | 2020-07-03T09:12:49 | 276,854,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
str = input("Enter a string: ")
count = 0
for s in str:
count = count+1
print("Length of the input string is:", count)
| [
"noreply@github.com"
] | dinesh5555.noreply@github.com |
eb30907eb5f49b5c4a6f3912396aeacd640f746d | 4bd4bacecee33cada173e427b5ecb1d758bafaad | /src/scalarizr/util/iptables.py | 94d6b1f6c781fcd1dc0ea0c68c7b7cc3fd6b21d2 | [] | no_license | kenorb-contrib/scalarizr | 3f2492b20910c42f6ab38749545fdbb79969473f | 3cc8b64d5a1b39c4cf36f5057f1a6a84a9a74c83 | refs/heads/master | 2022-11-26T10:00:58.706301 | 2017-11-02T16:41:34 | 2017-11-02T16:41:34 | 108,550,233 | 0 | 2 | null | 2020-07-24T11:05:36 | 2017-10-27T13:33:46 | Python | UTF-8 | Python | false | false | 7,747 | py | '''
Created on Jul 21, 2010
@author: Dmytro Korsakov
'''
from scalarizr.util import system2, UtilError
from scalarizr import linux
import os
import logging
LOG = logging.getLogger(__name__)
P_TCP = "tcp"
P_UDP = "udp"
P_UDPLITE = "udplite"
P_ICMP = "icmp"
P_ESP = "esp"
P_AH = "ah"
P_SCTP = "sctp"
P_ALL = "all"
PROTOCOLS = (P_TCP, P_UDP, P_UDPLITE, P_ICMP, P_ESP, P_AH, P_SCTP, P_ALL)
CHKCONFIG = '/sbin/chkconfig'
class RuleSpec(object):
specs = None
def __init__(self, protocol=None, source=None, destination=None,
inint=None, outint=None, sport = None, dport = None, jump=None, custom=None):
self.specs = {}
self.specs['-p'] = protocol
self.specs['-s'] = source
self.specs['-d'] = destination
self.specs['-i'] = inint
self.specs['-o'] = outint
self.specs['-j'] = jump
self.specs['--sport'] = sport
self.specs['--dport'] = dport
self.specs['custom'] = custom
def __str__(self):
rule_spec = ''
specs = [self.specs['-p'], self.specs['-s'], self.specs['-d'], self.specs['-i'], \
self.specs['-o'], self.specs['--sport'], self.specs['--dport'], self.specs['-j']]
keys = ('-p', '-s', '-d', '-i', '-o', '--sport', '--dport', '-j')
for item in range(0, len(specs)):
if specs[item] not in (None, 'custom'):
rule_spec +=' ! %s %s' % (keys[item], specs[item]) if is_inverted(specs[item]) \
else ' %s %s' % (keys[item],specs[item])
if self.specs['custom']:
rule_spec += self.specs['custom']
return str(rule_spec)
def __eq__(self, other):
p = self.specs['-p'] == other.specs['-p'] or \
(not self.specs['-p'] and other.specs['-p']=='ALL') or \
(not other.specs['-p'] and self.specs['-p']=='ALL')
s = self.specs['-s'] == other.specs['-s'] or \
(not self.specs['-s'] and other.specs['-s']=='0.0.0.0/0') or \
(not other.specs['-s'] and self.specs['-s']=='0.0.0.0/0')
d = (self.specs['-d'] == other.specs['-d']) or \
(not self.specs['-d'] and other.specs['-d']=='0.0.0.0/0') or \
(not other.specs['-d'] and self.specs['-d']=='0.0.0.0/0')
i = self.specs['-i'] == other.specs['-i']
o = self.specs['-o'] == other.specs['-o']
j = self.specs['-j'] == other.specs['-j']
dport = self.specs['--dport'] == other.specs['--dport']
sport = self.specs['--sport'] == other.specs['--sport']
if p and s and d and i and o and j and dport and sport:
return True
else:
return False
def is_inverted(param):
return type(param) == tuple and len(param) > 1 and not param[1]
class IpTables(object):
executable = None
def __init__(self, executable=None):
self.executable = executable or "/sbin/iptables"
def append_rule(self, rule_spec, chain='INPUT'):
rule = "%s -A %s%s" % (self.executable, chain, str(rule_spec))
system2(rule, shell=True)
def insert_rule(self, rule_num, rule_spec, chain='INPUT'):
if not rule_num:
rule_num = ''
rule = "%s -I %s %s%s" % (self.executable, chain, str(rule_num), str(rule_spec))
system2(rule, shell=True)
def delete_rule(self, rule_spec, chain='INPUT'):
rule = "%s -D %s%s" % (self.executable, chain, str(rule_spec))
system2(rule, shell=True)
def list_rules(self, chain='INPUT'):
table = system2('%s --line-numbers -nvL %s' % (self.executable, chain), shell=True)[0]
list = table.splitlines()
rules = []
for line in list:
if line.find("destination")==-1 and not line.startswith('Chain') and line.strip():
row = line.split()
row.reverse()
num = row.pop()
pkts = row.pop()
bytes = row.pop()
for option in range(1,len(row)):
if row[option].startswith('!'):
row[option] = (row[option][1:],False)
elif row[option] in ('--','*'):
row[option] = None
rule = RuleSpec()
last = row.pop()
if last not in PROTOCOLS:
rule.specs['-j'] = last
rule.specs['-p'] = row.pop()
else:
rule.specs['-p'] = last
opt = row.pop()
rule.specs['-i'] = row.pop()
rule.specs['-o'] = row.pop()
rule.specs['-s'] = row.pop()
rule.specs['-d'] = row.pop()
if len(row):
for spec in row:
if spec.startswith('dpt'):
rule.specs['--dport'] = spec.split(':')[1]
if spec.startswith('spt'):
rule.specs['--sport'] = spec.split(':')[1]
rules.append((rule, num))
return rules
def flush(self, chain='INPUT'):
rule = '%s -F %s' % (self.executable, chain)
system2(rule, shell=True)
def usable(self):
return os.access(self.executable, os.X_OK)
def enabled(self):
if linux.os.redhat_family:
return self._chkconfig()
else:
return self.usable()
def _chkconfig(self):
'''
returns True if iptables is enabled on any runlevel
redhat-based only
'''
if not os.path.exists(CHKCONFIG):
raise UtilError('chkconfig not found')
out, err, retcode = system2([CHKCONFIG, '--list', 'iptables'])
if err:
raise UtilError(str(err))
if out:
raw = out.split('\n')
for row in raw:
if row:
data = row.split('\t')
if len(data) == 8:
service = data.pop(0).strip()
levels = []
for level in data:
levels.append(True if 'on' in level else False)
if len(levels) == 7 and service=='iptables' and any(levels):
return True
return False
def _is_rule_not_exist(jump, port, protocol):
'''raise exception if current rule exist'''
jump, port, protocol = str(jump).strip(), str(port).strip(), str(protocol).strip()
for rule in IpTables().list_rules():
if port == rule[0].specs['--dport'] and protocol == rule[0].specs['-p'] \
and jump == rule[0].specs['-j']:
raise Exception('Rule `%s` already exist' % rule[0])
def insert_rule_once(jump, port, protocol):
'''add rule in iptables if it not exist'''
_is_rule_not_exist(jump, port, protocol)
ipt = IpTables()
if ipt.usable() and protocol in PROTOCOLS:
rspec = RuleSpec(dport=port, jump=jump, protocol=protocol)
ipt.insert_rule(None, rule_spec=rspec)
LOG.debug('Rule `%s` added to iptables rules', rspec)
else:
raise Exception('protocol `%s` is not known. It must be one of `%s`' %
(protocol, PROTOCOLS) if ipt.usable() else 'IpTables is not usable')
def remove_rule_once(jump, port, protocol):
'''remove rule from iptables'''
try:
_is_rule_not_exist(jump, port, protocol)
raise Exception('Rule for port=`%s`, protocol=`%s`, jump=`%s` '\
'not exist. It can`t be removed.' % (port, protocol, jump))
except:
IpTables().delete_rule(rule_spec = RuleSpec(dport=port, jump=jump, protocol=protocol))
| [
"kenorb@users.noreply.github.com"
] | kenorb@users.noreply.github.com |
6a6c540ce9785c1253f53ab49f99c2b68ac46a3f | 7eb61dcca9f98d077f8c497611257fa2c65ee44f | /2018 Programs/cupfill/cupfill.py | 87c3bf15448bf3af3cd39d4e010e1b40766ae4cb | [] | no_license | jasonyu0100/General-Programs | bf7d87cfae46d79a40e5b6c428e591ddc5ef4d97 | 0b3e71962159a8dadb3bca6db1c3a1ac2baf2634 | refs/heads/master | 2021-06-16T15:55:24.038897 | 2021-02-02T09:57:02 | 2021-02-02T09:57:02 | 139,376,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | maxA = 7
maxB = 5
goal = 2
possiblePaths = []
minMoves = {}
def action(a,b,goal,actions):
if a < 0 or b < 0:
return False
if (a,b) not in minMoves:
minMoves[(a,b)] = actions
elif len(minMoves[(a,b)]) < len(actions):
return False
else:
minMoves[(a,b)] = actions
if a == goal or b == goal:
possiblePaths.append(actions)
return True
path1 = action(maxA,b,goal,actions+['FA'])
path2 = action(a,maxB,goal,actions+['FB'])
path3 = action(0,b,goal,actions+['EA'])
path4 = action(a,0,goal,actions+['EB'])
path5 = action(a - (maxB - b),maxB,goal,actions+['AB'])
path6 = action(maxA,b - (maxA - a),goal,actions+['BA'])
return any([path1,path2,path3,path4,path5,path6])
print(action(0,0,goal,[]))
minPath = min(possiblePaths,key=lambda x:len(x))
print(minMoves)
print(minPath) | [
"jasonyu0100@gmail.com"
] | jasonyu0100@gmail.com |
3cd9e0dc765be24272981fe2bc0afcf7d5112721 | a9b0e2907079ceda6ee7ad14aa9362e1bbdb03f2 | /tests/unary_encoding_tests.py | 4ed9f94ad65548a5196ae3c0fd819f49e7b20bae | [
"Apache-2.0"
] | permissive | Yufei-Kang/plato | 93e2d1e3e2b50bda291df1ccf6394d3692064ab2 | 16b170698242b1e11677e80229c3439a9e26965a | refs/heads/main | 2023-08-28T18:10:51.604845 | 2021-10-22T13:36:54 | 2021-10-22T13:36:54 | 394,827,803 | 0 | 1 | Apache-2.0 | 2021-08-11T01:41:46 | 2021-08-11T01:41:46 | null | UTF-8 | Python | false | false | 1,782 | py | """
Unit tests for unary encoding, a local differential privacy mechanism that adds
noise to model weights or features before transmitting to the federated learning server.
"""
import unittest
import numpy as np
from plato.utils import unary_encoding
class UnaryEncodingTest(unittest.TestCase):
"""Tests for unary encoding and random response."""
@staticmethod
def unary_epsilon(p, q):
"""Computes epsilon from p and q.
Reference:
Wang, et al. "Optimizing Locally Differentially Private Protocols," ATC USENIX 2017.
"""
return np.log((p * (1 - q)) / ((1 - p) * q))
def test_epsilon_computation(self):
"""Test the correctness of computing p and q from a given epsilon."""
p = 0.75
q = 0.25
computed_epsilon = UnaryEncodingTest.unary_epsilon(p, q)
np.random.seed(1)
arr = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
symmetric = unary_encoding.symmetric_unary_encoding(
arr, computed_epsilon)
np.random.seed(1)
random_response = unary_encoding.produce_random_response(arr, p, q)
self.assertSequenceEqual(symmetric.tolist(), random_response.tolist())
def test_distribution_probability(self):
"""Test the distribution probability of the results."""
p = 0.75
runs = 100000
arr = np.array([1] * runs)
symmetric = unary_encoding.produce_random_response(arr, p)
total_ones = (symmetric == 1).sum()
print(f"Probability of ones = {total_ones / len(symmetric.tolist())}")
self.assertAlmostEqual(total_ones / len(symmetric.tolist()),
p,
delta=0.005)
if __name__ == '__main__':
unittest.main()
| [
"bli@ece.toronto.edu"
] | bli@ece.toronto.edu |
e1f3aca2a56dc6906ab481ae206c68519dc65684 | de55001f9ffc23f0cfecc4d2c929c1e4e59b3819 | /product/migrations/0002_auto_20210225_1426.py | fb1ac7ec90ac18808267fbebd8420f9c0d535e29 | [] | no_license | OmarFateh/ecommerce | 42d986363b378809f85c332117c813d911eb2a50 | 21a5fafb1aee29cee05805d1309822b2f0ea0e92 | refs/heads/master | 2023-03-31T17:36:19.739142 | 2021-04-08T19:25:38 | 2021-04-08T19:25:38 | 355,014,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # Generated by Django 2.2 on 2021-02-25 12:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='price',
new_name='regular_price',
),
migrations.RemoveField(
model_name='product',
name='brand',
),
migrations.RemoveField(
model_name='product',
name='group',
),
migrations.RemoveField(
model_name='product',
name='subcategory',
),
]
| [
"66747309+OmarFateh@users.noreply.github.com"
] | 66747309+OmarFateh@users.noreply.github.com |
af713b29ab653305219c06a1f33047c6b3caa5cc | c380976b7c59dadaccabacf6b541124c967d2b5a | /.history/src/models/predict_model_20191021180146.py | 68920c54dbf98eb72bc4e0cc6e5bcf8bb4be5110 | [
"MIT"
] | permissive | bkraft4257/kaggle_titanic | b83603563b4a3c995b631e8142fe72e1730a0e2e | f29ea1773773109a867278c001dbd21a9f7b21dd | refs/heads/master | 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | import pandas as pd
import numpy as np
from IPython.display import display, Markdown
from sklearn.metrics import log_loss
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import metrics
def concat_to_create_xy_test(
X_test: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.array
):
"""[summary]
Arguments:
X_test {[dataframe]} -- [description]
y_test {[dataframe]} -- [description]
y_pred {[np.array]} -- [description]
Returns:
[dataframe] -- [description]
"""
Xy_test = X_test.join(y_test).join(y_pred)
Xy_test["is_prediction_correct"] = Xy_test["survived_pred"] == Xy_test["survived"]
return Xy_test
def calc_metrics(Xy_test):
metric = {}
metric["log_loss"] = log_loss(
Xy_test["survived"].values, Xy_test["survived_pred"].values
)
metric["accuracy"] = Xy_test["is_prediction_correct"].mean()
return metric
def calc_logreg_model(X_train, y_train, X_test, y_test):
"""
Arguments:
X_train {[type]} -- [description]
y_train {[type]} -- [description]
Returns:
[type] -- [description]
"""
print("feature list ...")
print(f"{X_train.columns.tolist()}\n")
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = pd.Series(
logreg.predict(X_test), index=y_test.index, name="survived_pred"
).to_frame()
scores = cross_val_score(logreg, X_train, y_train, cv=10)
print(f"Cross Validation Accuracy Scores: {scores}")
print("\n\nAccuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() * 2))
return logreg, y_pred
| [
"bob.kraft@infiniteleap.net"
] | bob.kraft@infiniteleap.net |
04074927b737662e2c43d73486aa4505ea8019a9 | 7afd96cffcab14f5071c6a6a41890fe815e8be79 | /api.py | e27a2a4a37e3def35d4fb0c93745b301e8a5f8ec | [] | no_license | AppointmentGuru/VuexRequests | c9ac3d11269b3df4fe1e8e8909452c77c428a347 | fa8239f4aa5b6b63aeec25afe3f7ceddf0794207 | refs/heads/master | 2021-01-19T09:03:23.281908 | 2018-11-02T09:28:55 | 2018-11-02T09:28:55 | 87,717,426 | 1 | 0 | null | 2018-01-25T16:41:45 | 2017-04-09T15:12:42 | Vue | UTF-8 | Python | false | false | 2,909 | py | import hug, random, time
from faker import Factory
from falcon import HTTP_400, HTTP_500
fake = Factory.create()
STATUS_CODES = {
100: "100 Continue",
101: "101 Switching Protocols",
102: "102 Processing",
200: "200 OK",
201: "201 Created",
202: "202 Accepted",
203: "203 Non-authoritative Information",
204: "204 No Content",
205: "205 Reset Content",
206: "206 Partial Content",
207: "207 Multi-Status",
208: "208 Already Reported",
226: "226 IM Used",
300: "300 Multiple Choices",
301: "301 Moved Permanently",
302: "302 Found",
303: "303 See Other",
304: "304 Not Modified",
305: "305 Use Proxy",
307: "307 Temporary Redirect",
308: "308 Permanent Redirect",
400: "400 Bad Request",
401: "401 Unauthorized",
402: "402 Payment Required",
403: "403 Forbidden",
404: "404 Not Found",
405: "405 Method Not Allowed",
406: "406 Not Acceptable",
407: "407 Proxy Authentication Required",
408: "408 Request Timeout",
409: "409 Conflict",
410: "410 Gone",
411: "411 Length Required",
412: "412 Precondition Failed",
413: "413 Payload Too Large",
414: "414 Request-URI Too Long",
415: "415 Unsupported Media Type",
416: "416 Requested Range Not Satisfiable",
417: "417 Expectation Failed",
418: "418 I'm a teapot",
421: "421 Misdirected Request",
422: "422 Unprocessable Entity",
423: "423 Locked",
424: "424 Failed Dependency",
426: "426 Upgrade Required",
428: "428 Precondition Required",
429: "429 Too Many Requests",
431: "431 Request Header Fields Too Large",
444: "444 Connection Closed Without Response",
451: "451 Unavailable For Legal Reasons",
499: "499 Client Closed Request",
500: "500 Internal Server Error",
501: "501 Not Implemented",
502: "502 Bad Gateway",
503: "503 Service Unavailable",
504: "504 Gateway Timeout",
505: "505 HTTP Version Not Supported",
506: "506 Variant Also Negotiates",
507: "507 Insufficient Storage",
508: "508 Loop Detected",
510: "510 Not Extended",
511: "511 Network Authentication Required",
599: "599 Network Connect Timeout Error",
}
@hug.response_middleware()
def process_data(request, response, resource):
response.set_header('Access-Control-Allow-Origin', '*')
@hug.get('/api')
@hug.post('/api')
def api(response, status:hug.types.number=0, wait:hug.types.number=0):
"""Gives a random response after a random amount of time"""
if wait == 0: wait = random.choice(range(0,10))
if status == 0:
status = 200
# 1 in 5 likihood of non-200 response
if random.choice(range(0,5)) == 0:
status = random.choice(STATUS_CODES.keys())
status_result = STATUS_CODES.get(status)
time.sleep(wait)
response.status = status_result
return fake.pydict()
| [
"info@38.co.za"
] | info@38.co.za |
570de7e13a83461ad46693a0887b7c728e711fdb | aa4aa51465d79e0447cbe22281f0402ca95bdaa2 | /python/project/AlexNet_Keras.py | c3ebcf5f131d175a6ba14238ace2282d41b1a8a6 | [] | no_license | zuozuo12/usualProject | 2ca06bb7a1ff6f99343f1997053ba8d5a48e00a7 | 335bcef5d76d6cf0c84dd3209176089b3b07fbba | refs/heads/master | 2020-11-27T17:02:33.252884 | 2019-10-22T06:46:32 | 2019-10-22T06:46:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,825 | py | import tensorflow as tf
from tensorflow.python.keras.layers import Conv2D,MaxPool2D,Dropout,Flatten,Dense
def inference(inputs,num_classe=1000,is_training=True,dropout_keep_prob=0.5):
'''
Inference
inputs: a tensor of images
num_classes: the num of category.
is_training: set ture when it used for training
dropout_keep_prob: the rate of dropout during training
'''
x=inputs;
#conv1
x=Conv2D(96,[11,11],4,activation='relu',name='conv1')(x);
#lrn1
x=tf.nn.local_response_normalization(x,name='lrn1');
#pool1
x=MaxPool2D([3,3],2,name='pool1')(x)
#conv2
x=Conv2D(256,[5,5],activation='relu',padding='same',name='conv2')(x)
#lrn2
x=tf.nn.local_response_normalization(x,name='lrn2');
#maxpool2
x=MaxPool2D([3,3],2,name='pool2')(x)
#conv3
x=Conv2D(384,[3,3],activation='relu',padding='same',name='conv3')(x)
#conv4
x=Conv2D(384,[3,3],activation='relu',padding='same',name='conv4')(x)
#conv5
x=Conv2D(256,[3,3],activation='relu',padding='same',name='conv5')(x)
#pool5
x=MaxPool2D([3,3],2,name='pool5')(x)
#flatten使数据扁平化
x=Flatten(name='flatten')(x)
if is_training:
x=Dropout(dropout_keep_prob,name='dropout5')(x)
#fc6
x=Dense(4096,activation='relu',name='fc6')(x);
if is_training:
x=Dropout(dropout_keep_prob,name='dropout6')(x)
x#fc7
x=Dense(4096,activation='relu',name='fc7')(x)
#fc8
logits=Dense(num_classe,name='logit')(x)
return logits;
def build_cost(logits,labels,weight_decay_rate):
'''
cost
logits: predictions
labels: true labels
weight_decay_rate: weight_decay_rate
'''
with tf.variable_scope('coses'):
'''
tf.variable_scope可以让变量有相同的命名,包括tf.get_variable得到的变量,还有tf.Variable的变量
tf.name_scope可以让变量有相同的命名,只是限于tf.Variable的变量
'''
with tf.variable_scope('xent'):
xent=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels));
with tf.variable_scope('decay'):
costs=[]
for var in tf.trainable_variables():
costs.append(tf.nn.l2_loss(var));
tf.summary.histogram(var.op.name,var);
cost_decay=tf.multiply(weight_decay_rate,tf.add_n(costs));
cost=tf.add(xent,cost_decay);
tf.summary.scalar('cost',cost);
return cost;
def build_train_op(cost,lrn_rate,global_step):
'''
train_op
cost: cost
lrn_rate: learning rate
global_step: global step
'''
with tf.variable_scope('train'):
lrn_rate = tf.constant(lrn_rate, tf.float32)
tf.summary.scalar('learning_rate', lrn_rate) # summary
trainable_variables = tf.trainable_variables()
grads = tf.gradients(cost, trainable_variables)
optimizer = tf.train.AdamOptimizer(lrn_rate)
apply_op = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=global_step, name='train_step')
train_op = apply_op
return train_op
if __name__ == '__main__':
images = tf.placeholder(tf.float32, [None, 224, 224, 3])
labels = tf.placeholder(tf.float32, [None, 1000])
logits = inference(inputs=images,
num_classe=1000)
print('inference: good job')
cost = build_cost(logits=logits,
labels=labels,
weight_decay_rate=0.0002)
print('build_cost: good job')
global_step = tf.train.get_or_create_global_step()
train_op = build_train_op(cost=cost,
lrn_rate=0.001,
global_step=global_step)
print('build_train_op: good job')
| [
"llfwyyx@163.com"
] | llfwyyx@163.com |
4a69bccd7b42b5d3695af923a042e0e54a07e4b6 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayCommerceSportsLessonBackstageSyncResponse.py | 9961cd264762855068b37e7b91d97383dde25433 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 482 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceSportsLessonBackstageSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceSportsLessonBackstageSyncResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayCommerceSportsLessonBackstageSyncResponse, self).parse_response_content(response_content)
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
ffb7a3b5a5e182112bde91c23e46c72395497317 | de89f51d5f904ca666a778b06831c7e0bc9457e0 | /payment/urls.py | 833c2bcf8389066c733dbdf76307d35397c836d6 | [] | no_license | Kennedy-Njeri/Brightwriter | 3ee7fe6999413a8ef0abc403dd76f82ad45d5f41 | ecb079aace588a7eb8fbda302965d18372937e2c | refs/heads/master | 2022-12-10T19:03:22.624623 | 2019-07-25T20:52:06 | 2019-07-25T20:52:06 | 184,429,912 | 0 | 0 | null | 2022-12-08T05:03:23 | 2019-05-01T14:38:14 | Python | UTF-8 | Python | false | false | 254 | py |
from django.urls import path
from . import views
urlpatterns = [
path('<int:pk>', views.payment_process, name="process"),
path('done/', views.payment_done, name="done"),
path('canceled/', views.payment_canceled, name="canceled"),
] | [
"mistakenz123@gmail.com"
] | mistakenz123@gmail.com |
ba7fa51caac306808c4951ba1818867e6db437ea | 8c51aff248eb6f463d62e934213660437c3a107b | /Lecture_note_Python/day_02/list_10.py | 9019408b03dd0369b32a70921ceea15941ed75db | [] | no_license | wonjun0901/WJ_Develop_Individually | 5f839932c189adf2b2b34f7dadbdeaa8744f8d0e | e0402f5dbdda8ae8292cace124d381e29f707183 | refs/heads/master | 2021-01-02T00:13:38.851832 | 2020-02-18T01:10:15 | 2020-02-18T01:10:15 | 239,406,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # -*- coding: utf-8 -*-
# 리스트 내부에 저장된 요소들을 정렬하는 방법
# - sort 메소드를 사용하여 처리할 수 있음
numbers = [5,1,3,2,9,6,7,8]
print(numbers)
# 리스트 내부의 요소들을 오름차순 정렬하는 예제
# sort 메소드 사용
numbers.sort()
print(numbers)
# sort 메소드의 reverse 매개변수의 값을 True로
# 지정하여 내림차순 정렬을 수행할 수 있음
numbers.sort(reverse=True)
print(numbers)
# 리스트 변수의 reverse 메소드를 사용하여
# 정렬된 이후, 역순으로 값을 배치할 수 있음
numbers.reverse()
print(numbers)
| [
"wonjun0901@gmail.com"
] | wonjun0901@gmail.com |
33432703d01def7cef6f075a85009fdd56882ba0 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.65_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=78/params.py | 1f9142536f19ad3ffc6125426fe1eb417594e568 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.535357',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 78,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
097ef5ecaf03cae65fc13fcf38177a7f292cd4e1 | ec551303265c269bf1855fe1a30fdffe9bc894b6 | /topic4_dynamic_planning_study/0811_waysToChange/interview.py | 6aa0312120f2192bc46403734cf98373cd7d1aba | [] | no_license | GongFuXiong/leetcode | 27dbda7a5ced630ae2ae65e19d418ebbc65ae167 | f831fd9603592ae5bee3679924f962a3ebce381c | refs/heads/master | 2023-06-25T01:05:45.683510 | 2021-07-26T10:05:25 | 2021-07-26T10:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | ''' 面试题 08.11. 硬币
硬币。给定数量不限的硬币,币值为25分、10分、5分和1分,编写代码计算n分有几种表示法。(结果可能会很大,你需要将结果模上1000000007)
示例1:
输入: n = 5
输出:2
解释: 有两种方式可以凑成总金额:
5=5
5=1+1+1+1+1
示例2:
输入: n = 10
输出:4
解释: 有四种方式可以凑成总金额:
10=10
10=5+5
10=5+1+1+1+1+1
10=1+1+1+1+1+1+1+1+1+1
'''
class Solution:
#def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
'''
方法三 动态规划
'''
def waysToChange(self, n):
'''
动态规划
:param coins: List
:param amount: int
:return:
ans int
'''
mod = 10**9 + 7
dp = [0] * (n+1)
coins = [1,5,10,25]
dp[0] = 1
for coin in coins:
for i in range(coin,n+1):
dp[i] = dp[i] + dp[i-coin]
print(f"dp:{dp}")
return dp[-1] % mod
if __name__ == "__main__":
solution = Solution()
while 1:
str1 = input()
if str1 != "" :
num = int(str1)
res = solution.waysToChange(num)
print(res)
else:
break
| [
"958747457@qq.com"
] | 958747457@qq.com |
6c0a4d084237c14eb86f1545fb7fbb10ebf90a20 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02677/s634954826.py | fe7cfc5cace77a99cc38b1608c9bf586cb445d92 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | import math
a, b, h,m = map(int,input().split())
h_deg = (60 * h + m) * (360 / (60*12))
m_deg = m * (360 / 60)
deg = abs(h_deg-m_deg)
rad = math.radians(deg)
c = a ** 2 + b ** 2 - 2 * a * b * math.cos(rad)
print(math.sqrt(c))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b4f4459956be2f25292c8ff73297ee8d2ddc0304 | 8a086d7aa38e270bddd978a7ac29cec7cb52b344 | /test_gcp_8_8/settings.py | f3ec86ee6f55fc01f458a85220d488a9b938e246 | [] | no_license | crowdbotics-dev/test-gcp-8-8 | 70d3b2744fb14cd34b7fc9d1d5d51b6b9e509a77 | 222e3d9599663f26acab85f32db5363dd8d9e839 | refs/heads/master | 2023-08-28T20:11:33.339043 | 2021-10-25T08:44:23 | 2021-10-25T08:44:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,811 | py | """
Django settings for test_gcp_8_8 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_gcp_8_8.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_gcp_8_8.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| [
"engineering@crowdbotics.com"
] | engineering@crowdbotics.com |
95a2a0b6f5adae2c3d5641de782efc6e002d29d2 | eae5a4f3b5407afd34581e65c1f82a04b7a1855b | /heap/adt.py | 9e3e1f8eb4199041aeefa8bf4c08022849dca4ba | [] | no_license | hjlarry/leetcode | 184331b170ee3d3d1ae8b83a95583e73d900a01c | 54ff328131bf2ef387292f31a0e2a2c2cf612cdd | refs/heads/master | 2021-08-08T02:10:36.022242 | 2020-05-07T07:45:58 | 2020-05-07T07:45:58 | 172,198,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,816 | py | class MaxHeap:
def __init__(self, maxsize=None):
self.maxsize = maxsize or 32
self._elements = [None] * self.maxsize
self._count = 0
def __len__(self):
return self._count
def add(self, item):
if self._count >= self.maxsize:
raise Exception("full heap")
self._elements[self._count] = item
self._count += 1
self._sift_up(self._count - 1)
def _sift_up(self, cur):
if cur > 0:
parent = int((cur - 1) / 2)
if self._elements[cur] > self._elements[parent]:
self._elements[cur], self._elements[parent] = (
self._elements[parent],
self._elements[cur],
)
self._sift_up(parent)
def extract(self):
if self._count < 1:
raise Exception("empty heap")
value = self._elements[0]
self._count -= 1
self._elements[0] = self._elements[self._count]
self._sift_down(0)
return value
def _sift_down(self, cur):
left = 2 * cur + 1
right = 2 * cur + 2
largest = cur
if (
left < self._count
and self._elements[left] > self._elements[cur]
and self._elements[left] > self._elements[right]
):
largest = left
elif right < self._count and self._elements[right] > self._elements[cur]:
largest = right
if largest != cur:
self._elements[cur], self._elements[largest] = (
self._elements[largest],
self._elements[cur],
)
self._sift_down(largest)
def test_maxheap():
n = 5
h = MaxHeap(n)
for i in range(n):
h.add(i)
for i in reversed(range(n)):
assert i == h.extract()
class MinHeap:
# 和最大堆相比,只是在_sift_up和_sift_down时的比较判断不同
def __init__(self, maxsize=None):
self.maxsize = maxsize or 32
self._elements = [None] * self.maxsize
self._count = 0
def __len__(self):
return self._count
def add(self, item):
if self._count >= self.maxsize:
raise Exception("full heap")
self._elements[self._count] = item
self._count += 1
self._sift_up(self._count - 1)
def _sift_up(self, cur):
if cur > 0:
parent = int((cur - 1) / 2)
if self._elements[cur] < self._elements[parent]:
self._elements[cur], self._elements[parent] = (
self._elements[parent],
self._elements[cur],
)
self._sift_up(parent)
def extract(self):
if self._count < 1:
raise Exception("empty heap")
value = self._elements[0]
self._count -= 1
self._elements[0] = self._elements[self._count]
self._sift_down(0)
return value
def _sift_down(self, cur):
left = 2 * cur + 1
right = 2 * cur + 2
smallest = cur
if (
left < self._count
and self._elements[left] < self._elements[cur]
and self._elements[left] < self._elements[right]
):
smallest = left
elif right < self._count and self._elements[right] < self._elements[cur]:
smallest = right
if smallest != cur:
self._elements[cur], self._elements[smallest] = (
self._elements[smallest],
self._elements[cur],
)
self._sift_down(smallest)
def test_minheap():
n = 5
h = MinHeap(n)
for i in range(n):
h.add(i)
for i in range(n):
assert i == h.extract()
if __name__ == "__main__":
test_maxheap()
test_minheap()
| [
"hjlarry@163.com"
] | hjlarry@163.com |
6cb68e4c8e07aa0a90c40aae5f4def02d266a930 | 7d8b5220152b4ef4876c489d6648be56bc83c8e7 | /exercises/development/advanced/exercise_1.py | 49045a14a3f0fc47d0b7c605f4cce8cd7487148f | [
"CC-BY-4.0",
"ISC",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | comp-think/comp-think.github.io | 8f89518e7a463376b431f55fb7f495cb3019d4a5 | e48a7ecf3b1799471271e01430e089e8f8e3c68d | refs/heads/master | 2023-01-04T20:38:27.593237 | 2023-01-02T14:48:54 | 2023-01-02T14:48:54 | 157,171,226 | 52 | 22 | NOASSERTION | 2023-01-02T14:48:55 | 2018-11-12T07:11:23 | Python | UTF-8 | Python | false | false | 2,943 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from math import ceil
from re import sub
# Test case for the function
def test_ari(text, expected):
result = ari(text)
if expected == result:
return True
else:
return False
# Code of the function
def ari(text):
num_char = len(sub("[^A-z0-9]", "", text))
num_word = len(text.split())
num_sent = len(sub("[^\\.]", "", text))
ari_value = (4.71 * (num_char / num_word)) + (0.5 * (num_word / num_sent)) - 21.43
return ceil(ari_value)
# Tests
print(test_ari("This is just a string.", -2))
print(test_ari("Semantic Publishing involves the use of Web and Semantic Web technologies and standards for the " \
"semantic enhancement of a scholarly work so as to improve its discoverability, interactivity, " \
"openness and (re-)usability for both humans and machines. Recently, people have suggested that " \
"the semantic enhancements of a scholarly work should be undertaken by the authors of that scholarly " \
"work, and should be considered as integral parts of the contribution subjected to peer review. " \
"However, this requires that the authors should spend additional time and effort adding such semantic " \
"annotations, time that they usually do not have available. Thus, the most pragmatic way to " \
"facilitate this additional task is to use automated services that create the semantic annotation " \
"of authors' scholarly articles by parsing the content that they have already written, thus reducing " \
"the additional time required of the authors to that for checking and validating these semantic " \
"annotations. In this article, I propose a generic approach called compositional and iterative " \
"semantic enhancement (CISE) that enables the automatic enhancement of scholarly papers with " \
"additional semantic annotations in a way that is independent of the markup used for storing " \
"scholarly articles and the natural language used for writing their content.", 25))
| [
"essepuntato@gmail.com"
] | essepuntato@gmail.com |
fe81555e1d4fc79957700c377446435804424705 | 052514e42a741be1371c74b7882f153d8056fb2c | /tutorial/settings.py | a3982bbfbf9ad8c19cffc53fc55b0ff1af356a0c | [] | no_license | zoleikha-mousavipak/tutorial | 1d1c6654bfe19bc293b7c6b03d56d670e1e9406f | bbef79a51b455cad8e342df406afb2f73cbd2234 | refs/heads/master | 2020-06-12T20:57:37.259970 | 2019-06-29T15:57:53 | 2019-06-29T15:57:53 | 194,423,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | """
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&!2o#j(y^djxgok2q+s#$^y4b@r9orhm+ey@3dm@k=j0p%u$v%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 5
} | [
"zmpak2000@gmail.com"
] | zmpak2000@gmail.com |
ef560dbd8795f56d2d6af2242fdee943d48d5f56 | bc11e10521fa313d83011e77a2c31a0b6ed581af | /lib/rubyfox/server/data/lib/Lib/encodings/cp866.py | 518eede0adb67d4c85e94e435c85e8f824a51c7d | [
"MIT"
] | permissive | neopoly/rubyfox-server | f6f191c68dcc30b8c56d22c8209e4a69251f4f27 | 26d67687fc642111ef8d02507f2b567828bd1ebd | refs/heads/master | 2023-07-20T15:04:32.028192 | 2023-07-17T09:16:36 | 2023-07-17T09:33:20 | 6,457,322 | 3 | 4 | MIT | 2020-08-11T06:53:50 | 2012-10-30T13:06:32 | Python | UTF-8 | Python | false | false | 7,092 | py | """ Python Character Mapping Codec generated from 'CP866.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| [
"ps@neopoly.de"
] | ps@neopoly.de |
5efa166cf25d0fb4221bbc7c26dd5d295c38e203 | 52c3ef5ae2c86cbde6c46c7c8d225ddf165d0632 | /GBM/GBMSlidingwindows_V2/addRawMeanGlobalNorm.py | 05fa0500e35d68234d1972e2e38f7318a5c0d456 | [
"MIT"
] | permissive | joshlyman/TextureAnalysis | 7ae028584af6466cd96e207060a916611511d300 | bfbedbd53f62396fdef383408089b37e5ab511d0 | refs/heads/master | 2020-09-03T04:07:16.372160 | 2019-11-03T23:51:20 | 2019-11-03T23:51:20 | 219,382,186 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,673 | py | # updated Raw Mean and Std based on whole image normalization (Max and Min)
import os
import csv
import fnmatch
import SimpleITK
import numpy
rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset'
texturesPath = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/addYlabel/GBM_SlidingWindow_TextureMap'
outputpath = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/AddGlobalNormedRawMeaninside/'
# outputpath = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/TestWithoutNorm/'
def Read2DImage(fileName, rotateAngle=0):
rawImage = SimpleITK.ReadImage(fileName)
imgArray = SimpleITK.GetArrayFromImage(rawImage)
# Convert 3D Image to 2D
if len(imgArray.shape) == 3:
imgArray = imgArray[0, :, :]
return imgArray
def Norm_Mean_Std_LargestBox(imgArray,imgMax,imgMin):
imgRange = imgMax - imgMin
imgArray = (imgArray - imgMin) * (255.0 / imgRange)
meang = numpy.mean(imgArray)
stdg = numpy.std(imgArray)
return meang,stdg
for texturesfile in os.listdir(texturesPath):
if texturesfile.startswith('.'):
continue
if texturesfile.startswith('..'):
continue
print texturesfile
patient = texturesfile.split('_')[0]
slicenum = texturesfile.split('_')[1].split('slice')[1]
texturesfilepath = os.path.join(texturesPath,texturesfile)
for casefolder in os.listdir(rootDir):
if casefolder.startswith('.'):
continue
if casefolder.startswith('..'):
continue
if fnmatch.fnmatch(casefolder,'*'+str(patient)+'*'):
print casefolder
print '\n'
slicefolder = 'slice'+str(slicenum)
casefolderpath = os.path.join(rootDir,casefolder)
slicefolderpath = os.path.join(casefolderpath,slicefolder)
dcmfiledict = dict()
for dcmfile in os.listdir(slicefolderpath):
if dcmfile.startswith('.'):
continue
if fnmatch.fnmatch(dcmfile, '*dcm*') is False:
continue
if fnmatch.fnmatch(dcmfile, '*precontrast*'):
continue
if fnmatch.fnmatch(dcmfile, '*C*SPGR*') or fnmatch.fnmatch(dcmfile, '*+C*T1*') or fnmatch.fnmatch(
dcmfile, '*T1*+C*'):
SPGRCfile = dcmfile
dcmfiledict['SPGRC'] = SPGRCfile
if fnmatch.fnmatch(dcmfile, '*T2*'):
T2file = dcmfile
dcmfiledict['T2'] = T2file
if fnmatch.fnmatch(dcmfile, '*q*'):
Qfile = dcmfile
dcmfiledict['Q'] = Qfile
if fnmatch.fnmatch(dcmfile, '*p*'):
Pfile = dcmfile
dcmfiledict['P'] = Pfile
if fnmatch.fnmatch(dcmfile, '*rCBV*'):
RCBVfile = dcmfile
dcmfiledict['RCBV'] = RCBVfile
if fnmatch.fnmatch(dcmfile, '*EPI*+C*') or fnmatch.fnmatch(dcmfile, '*+C*EPI*'):
EPIfile = dcmfile
dcmfiledict['EPI'] = EPIfile
outputfeaturefilepath = os.path.join(outputpath,texturesfile)
with open(outputfeaturefilepath, 'wb') as writefile:
featureWriter = csv.writer(writefile, dialect='excel')
with open(texturesfilepath, 'r') as featuresfile:
# featuresfile.readline()
rowFile = csv.reader(featuresfile, delimiter=',')
for row in rowFile:
if row[0] == 'Image Contrast':
row.insert(42,'Global Normalized Raw Mean')
row.insert(43,'Global Normalized Raw Std')
featureWriter.writerow(row)
else:
contrast = row[0]
dcmfile = dcmfiledict[contrast]
dcmfilepath = os.path.join(slicefolderpath,dcmfile)
xcoord = int(row[2])
ycoord = int(row[3])
dicomImage = Read2DImage(dcmfilepath)
subImage = dicomImage[ycoord - 4:ycoord + 4, xcoord - 4:xcoord + 4]
Raw_mean, Raw_std = Norm_Mean_Std_LargestBox(subImage, dicomImage.max(), dicomImage.min())
row.insert(42,Raw_mean)
row.insert(43,Raw_std)
featureWriter.writerow(row)
| [
"yanzhexu@asu.edu"
] | yanzhexu@asu.edu |
5ecb1beb5854b4ea5f97cb54834329219457f1d9 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.8_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=25/sched.py | 39456379673b5e513262d6038623284df7dbac6f | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | -X FMLP -Q 0 -L 3 102 300
-X FMLP -Q 0 -L 3 100 400
-X FMLP -Q 0 -L 3 53 200
-X FMLP -Q 1 -L 2 47 300
-X FMLP -Q 1 -L 2 46 200
-X FMLP -Q 1 -L 2 40 200
-X FMLP -Q 2 -L 2 39 150
-X FMLP -Q 2 -L 2 39 200
-X FMLP -Q 3 -L 1 31 150
-X FMLP -Q 3 -L 1 22 250
17 150
17 200
16 150
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
1bfdc532a4f8ca1c7618fc2febf475737d4274c3 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/program/model/util/AddressLabelInfo.pyi | 47dc2479e2d7c15df523627720cb7c02b0825660 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | pyi | import ghidra.program.model.address
import ghidra.program.model.symbol
import ghidra.program.model.util
import java.lang
class AddressLabelInfo(object, java.lang.Comparable):
"""
AddressLabelInfo is a utility class for storing
an Address and a corresponding label or alias together.
"""
@overload
def __init__(self, addr: ghidra.program.model.address.Address):
"""
Constructs a new AddressLabelInfo object with only address information
@param addr the address to store in this object
"""
...
@overload
def __init__(self, s: ghidra.program.model.symbol.Symbol):
"""
Constructs a new AddressLabelInfo object
@param s symbol to initialize info from.
"""
...
@overload
def __init__(self, addr: ghidra.program.model.address.Address, label: unicode, isPrimary: bool, symbolSource: ghidra.program.model.symbol.SourceType): ...
@overload
def __init__(self, addr: ghidra.program.model.address.Address, label: unicode, isPrimary: bool, scope: ghidra.program.model.symbol.Namespace, symbolSource: ghidra.program.model.symbol.SourceType, isEntry: bool): ...
@overload
def __init__(self, addr: ghidra.program.model.address.Address, label: unicode, isPrimary: bool, scope: ghidra.program.model.symbol.Namespace, symbolSource: ghidra.program.model.symbol.SourceType, isEntry: bool, type: ghidra.program.model.util.ProcessorSymbolType): ...
@overload
def compareTo(self, info: ghidra.program.model.util.AddressLabelInfo) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getAddress(self) -> ghidra.program.model.address.Address:
"""
Returns the object's address.
"""
...
def getClass(self) -> java.lang.Class: ...
def getLabel(self) -> unicode:
"""
Returns the object's label or alias.
"""
...
def getProcessorSymbolType(self) -> ghidra.program.model.util.ProcessorSymbolType:
"""
Returns the type of processor symbol (if this was defined by a pspec) or null if this
is not a processor symbol or it was not specified in the pspec file. It basically allows
a pspec file to give more information about a symbol such as if code or a code pointer is
expected to be at the symbol's address.
@return the ProcesorSymbolType if it has one.
"""
...
def getScope(self) -> ghidra.program.model.symbol.Namespace:
"""
Returns the scope for the symbol.
"""
...
def getSource(self) -> ghidra.program.model.symbol.SourceType: ...
def hashCode(self) -> int: ...
def isEntry(self) -> bool: ...
def isPrimary(self) -> bool:
"""
Returns whether the object is the primary label at the address.
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def address(self) -> ghidra.program.model.address.Address: ...
@property
def entry(self) -> bool: ...
@property
def label(self) -> unicode: ...
@property
def primary(self) -> bool: ...
@property
def processorSymbolType(self) -> ghidra.program.model.util.ProcessorSymbolType: ...
@property
def scope(self) -> ghidra.program.model.symbol.Namespace: ...
@property
def source(self) -> ghidra.program.model.symbol.SourceType: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
59ccb3889943b1db7267ae6e01d546a994d43164 | 05083d24088bbb3bfb7cdd162c101c72e18bc3a6 | /containers/restart/component-invalid-uri.py | c1ffbc055d0bec36aefe23271f0429e6b1d50802 | [
"Apache-2.0"
] | permissive | crossbario/crossbar-examples | f5e14b62db0f14e20ab54346cd4e8c3276aa6449 | aa31d9fe3abcb4b797931356b5a2ceeac64229c3 | refs/heads/master | 2023-01-11T02:36:00.883034 | 2023-01-03T11:12:06 | 2023-01-03T11:12:06 | 28,035,551 | 100 | 122 | Apache-2.0 | 2023-01-03T11:12:07 | 2014-12-15T12:23:02 | HTML | UTF-8 | Python | false | false | 4,301 | py | import os
import threading
import txaio
txaio.use_twisted()
from txaio import make_logger
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import CallDetails, RegisterOptions, EventDetails, SubscribeOptions
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import InvalidUriError
INVALID_URI = "crossbarfabriccenter.node.{'oid': '4752c752-a128-4ae4-a041-84208eabe49d'}.get_docker_images"
class MyComponent(ApplicationSession):
log = make_logger()
def __init__(self, config):
self.ident = '{}:{}'.format(os.getpid(), threading.get_ident())
self.log.info('{klass}[{ident}].__init__(config={config})',
klass=self.__class__.__name__, ident=self.ident, config=str(config))
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info('{klass}[{ident}].onJoin(details={details})',
klass=self.__class__.__name__, ident=self.ident, details=details)
# REGISTER
try:
yield self.register(self.echo, INVALID_URI, options=RegisterOptions(invoke='roundrobin'))
except Exception as e:
self.log.failure()
if isinstance(e, InvalidUriError):
self.log.info('OK: REGISTER got expected exception InvalidUriError!')
else:
self.log.warn('ERROR: REGISTER got unexpected exception "{err}"', err=str(e))
else:
self.log.warn('ERROR: REGISTER expected a InvalidUriError exception - but got none! ')
# CALL
try:
yield self.call(INVALID_URI, b'\xff' * 16)
except Exception as e:
self.log.failure()
if isinstance(e, InvalidUriError):
self.log.info('OK: CALL got expected exception InvalidUriError!')
else:
self.log.warn('ERROR: CALL got unexpected exception "{err}"', err=str(e))
else:
self.log.warn('ERROR: CALL expected a InvalidUriError exception - but got none! ')
# SUBSCRIBE
try:
yield self.subscribe(self.on_topic1, INVALID_URI, options=SubscribeOptions(details=True))
except Exception as e:
self.log.failure()
if isinstance(e, InvalidUriError):
self.log.info('OK: SUBSCRIBE got expected exception InvalidUriError!')
else:
self.log.warn('ERROR: SUBSCRIBE got unexpected exception "{err}"', err=str(e))
else:
self.log.warn('ERROR: SUBSCRIBE expected a InvalidUriError exception - but got none! ')
# PUBLISH
try:
yield self.publish(INVALID_URI, b'\xff' * 16)
except Exception as e:
self.log.failure()
if isinstance(e, InvalidUriError):
self.log.info('OK: PUBLISH got expected exception InvalidUriError!')
else:
self.log.warn('ERROR: PUBLISH got unexpected exception "{err}"', err=str(e))
else:
self.log.warn('ERROR: PUBLISH expected a InvalidUriError exception - but got none! ')
# OK, all done!
yield self.leave()
def echo(self, data, details=None):
assert type(data) == bytes, '"data" must be bytes, but was {}'.format(type(data))
assert details is None or isinstance(details, CallDetails), '"details" must be CallDetails, but was {}'.format(type(details))
self.log.info('{klass}[{ident}].echo(data={dlen}, details={details}): echo return {dlen} bytes',
klass=self.__class__.__name__,
ident=self.ident,
details=details,
dlen=len(data))
return data
def on_topic1(self, data, details=None):
assert type(data) == bytes, '"data" must be bytes, but was {}'.format(type(data))
assert details is None or isinstance(details, EventDetails), '"details" must be EventDetails, but was {}'.format(type(details))
self.log.info('{klass}[{ident}].on_topic1(data={dlen}, details={details})',
klass=self.__class__.__name__,
ident=self.ident,
details=details,
dlen=len(data))
| [
"tobias.oberstein@crossbario.com"
] | tobias.oberstein@crossbario.com |
a8a1a4dc8c8273f814e2058d488401d7146c8e5a | b0cc5ffd6cba61367ceb23acace49a2cecb51c60 | /test_appium0102/test_demo.py | 715414d6d7307f99b038b635cee0c4481a11352f | [] | no_license | tangsong41/hogwarts | eef7c4b978430ea528a55939559aeb2b843954c3 | d00091d5ea42bd57526788c4aab5f7f47ca7ae24 | refs/heads/main | 2023-02-17T01:42:48.966217 | 2021-01-12T01:51:03 | 2021-01-12T01:51:03 | 319,966,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,078 | py | # -*- coding: utf-8 -*-
"""
@author:tangsong
@file: test_demo.py
@time: 2020/12/31
"""
from appium import webdriver
class testDemo:
def setup(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '6.0'
desired_caps['deviceName'] = '127.0.0.1:7555'
desired_caps['appPackage'] = 'com.android.settings'
desired_caps['appActivity'] = 'com.android.settings.Settings'
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub/sessions', desired_caps)
def teardown(self):
self.driver.quit()
def test_demo(self):
el7 = self.driver.find_element_by_xpath(
"/hierarchy/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout[2]/android.widget.FrameLayout/android.widget.ScrollView/android.widget.LinearLayout/android.widget.LinearLayout[2]/android.view.ViewGroup/android.widget.FrameLayout[2]/android.widget.LinearLayout/android.widget.LinearLayout/android.widget.LinearLayout")
el7.click()
el8 = self.driver.find_element_by_xpath(
"/hierarchy/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout[2]/android.widget.LinearLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.ListView/android.widget.LinearLayout[3]/android.widget.RelativeLayout")
el8.click()
el9 = self.driver.find_element_by_id("android:id/button1")
el9.click()
el10 = self.driver.find_element_by_xpath(
"/hierarchy/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout[2]/android.widget.LinearLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.ListView/android.widget.LinearLayout[5]/android.widget.RelativeLayout/android.widget.TextView")
el10.click()
el11 = self.driver.find_element_by_accessibility_id("向上导航")
el11.click()
| [
"369223985@qq.com"
] | 369223985@qq.com |
d8b17d0bb37eb63334d10f716f8a6144759eaa62 | 40132307c631dccbf7aa341eb308f69389715c73 | /OLD/idmt/maya/Pluto/PlutoGun/temp/xx.py | 0af794bcc9ab3039c03a9b7d5692a0031244de61 | [] | no_license | Bn-com/myProj_octv | be77613cebc450b1fd6487a6d7bac991e3388d3f | c11f715996a435396c28ffb4c20f11f8e3c1a681 | refs/heads/master | 2023-03-25T08:58:58.609869 | 2021-03-23T11:17:13 | 2021-03-23T11:17:13 | 348,676,742 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,423 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading myuis file 'Z:/Resource/Support/Python/2.6-x64/Lib/site-packages/idmt/maya/Pluto/PlutoGun/xx.myuis'
#
# Created: Fri Dec 21 17:43:47 2012
# by: PyQt4 UI code generator 4.9.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(446, 508)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.tableWidget = QtGui.QTableWidget(self.centralwidget)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(2, 0, item)
self.verticalLayout.addWidget(self.tableWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 446, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.tableWidget, QtCore.SIGNAL(_fromUtf8("itemSelectionChanged()")), self.tableWidget.showMaximized)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.verticalHeaderItem(0)
item.setText(QtGui.QApplication.translate("MainWindow", "新建行", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.verticalHeaderItem(1)
item.setText(QtGui.QApplication.translate("MainWindow", "a", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(QtGui.QApplication.translate("MainWindow", "c", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(QtGui.QApplication.translate("MainWindow", "name", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(QtGui.QApplication.translate("MainWindow", "vis", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(QtGui.QApplication.translate("MainWindow", "speed", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
item = self.tableWidget.item(0, 0)
item.setText(QtGui.QApplication.translate("MainWindow", "x", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.item(1, 0)
item.setText(QtGui.QApplication.translate("MainWindow", "x", None, QtGui.QApplication.UnicodeUTF8))
item = self.tableWidget.item(2, 0)
item.setText(QtGui.QApplication.translate("MainWindow", "x", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setSortingEnabled(__sortingEnabled)
| [
"snakelonely@outlook.com"
] | snakelonely@outlook.com |
ee118db85a71932cd427bf3a4c40687623546892 | 916480ae24345193efa95df013f637e0a115653b | /web/transiq/broker/migrations/0005_auto_20180425_0052.py | 61b2758886a998f8cdd0fcbb2c8b121138260bc5 | [
"Apache-2.0"
] | permissive | manibhushan05/tms | 50e289c670e1615a067c61a051c498cdc54958df | 763fafb271ce07d13ac8ce575f2fee653cf39343 | refs/heads/master | 2022-12-11T07:59:30.297259 | 2021-09-08T03:24:59 | 2021-09-08T03:24:59 | 210,017,184 | 0 | 0 | Apache-2.0 | 2022-12-08T02:35:01 | 2019-09-21T16:23:57 | Python | UTF-8 | Python | false | false | 865 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-04-25 00:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('utils', '0015_auto_20180417_1922'),
('broker', '0004_auto_20180424_1909'),
]
operations = [
migrations.AddField(
model_name='broker',
name='aaho_offices',
field=models.ManyToManyField(blank=True, related_name='broker_aaho_offices', to='utils.AahoOffice'),
),
migrations.AlterField(
model_name='broker',
name='aaho_office',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='broker_aaho_office', to='utils.AahoOffice'),
),
]
| [
"mani@myhost.local"
] | mani@myhost.local |
18b706f303d4ce45bf9432ccf2440b871ae63d13 | 86cd22354f2431087c9b3ff06188f071afb3eb72 | /1155. Number of Dice Rolls With Target Sum.py | 380fb3242114c123ec73b41c55bf224c9a13eaad | [] | no_license | tlxxzj/leetcode | 0c072a74d7e61ef4700388122f2270e46c4ac22e | 06dbf4f5b505a6a41e0d93367eedd231b611a84b | refs/heads/master | 2023-08-31T11:04:34.585532 | 2023-08-31T08:25:51 | 2023-08-31T08:25:51 | 94,386,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | class Solution:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
dp = [0]*(target+1)
dp[0] = 1
for i in range(d):
for j in range(target, i, -1):
dp[j] = 0
for k in range(1, f+1):
if j-k >=i:
dp[j] += dp[j-k]
dp[j] %= 1000000007
return dp[target]
| [
"tlxxzj@qq.com"
] | tlxxzj@qq.com |
272b9d1172d3e7242c697639af070ac9d194d29b | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Contests/201-300/week 259/2013. Detect Squares/Detect Squares.py | 2290cee67baacc45ca4bb735f9f9646d052e93a9 | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: wangye(Wayne)
@license: Apache Licence
@file: Detect Squares.py
@time: 2021/09/19
@contact: wang121ye@hotmail.com
@site:
@software: PyCharm
# code is far away from bugs.
"""
from typing import *
import collections
class DetectSquares:
def __init__(self):
self.all_pt = collections.defaultdict(int)
self.ys = collections.defaultdict(set)
self.xs = collections.defaultdict(set)
def add(self, point: List[int]) -> None:
x, y = point
self.all_pt[(x, y)] += 1
self.xs[x].add(y)
self.ys[y].add(x)
def count(self, point: List[int]) -> int:
x, y = point
ret = 0
for xx in self.ys[y]:
if xx != x:
yy = y + (x - xx)
if yy in self.xs[x]:
if (xx, yy) in self.all_pt:
ret += self.all_pt[(xx, y)] * self.all_pt[(xx, yy)] * self.all_pt[(x, yy)]
yy = y - (x - xx)
if yy in self.xs[x]:
if (xx, yy) in self.all_pt:
ret += self.all_pt[(xx, y)] * self.all_pt[(xx, yy)] * self.all_pt[(x, yy)]
return ret
| [
"905317742@qq.com"
] | 905317742@qq.com |
3837b37dddc427ad588cb01cd3ec61947c588c9a | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/histogram/_cumulative.py | dd8bbaddfd43ef0a0302a4293f9011148ea3c2c3 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 1,927 | py | import _plotly_utils.basevalidators
class CumulativeValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='cumulative', parent_name='histogram', **kwargs
):
super(CumulativeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Cumulative'),
data_docs=kwargs.pop(
'data_docs', """
currentbin
Only applies if cumulative is enabled. Sets
whether the current bin is included, excluded,
or has half of its value included in the
current cumulative value. "include" is the
default for compatibility with various other
tools, however it introduces a half-bin bias to
the results. "exclude" makes the opposite half-
bin bias, and "half" removes it.
direction
Only applies if cumulative is enabled. If
"increasing" (default) we sum all prior bins,
so the result increases from left to right. If
"decreasing" we sum later bins so the result
decreases from left to right.
enabled
If true, display the cumulative distribution by
summing the binned values. Use the `direction`
and `centralbin` attributes to tune the
accumulation method. Note: in this mode, the
"density" `histnorm` settings behave the same
as their equivalents without "density": "" and
"density" both rise to the number of data
points, and "probability" and *probability
density* both rise to the number of sample
points.
"""
),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
d3ab238fe5245288e6a1241eca69f639cc2c4041 | 133e8c9df1d1725d7d34ea4317ae3a15e26e6c66 | /pythonlearn/Class/Advanced-Features/1.1.Generator.py | b1308537310c36ef27afb0fba7b9d3e84a6223b4 | [
"Apache-2.0"
] | permissive | 425776024/Learn | dfa8b53233f019b77b7537cc340fce2a81ff4c3b | 3990e75b469225ba7b430539ef9a16abe89eb863 | refs/heads/master | 2022-12-01T06:46:49.674609 | 2020-06-01T08:17:08 | 2020-06-01T08:17:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py |
import time
from collections import deque # 1.
class Dispatcher(object): # 2.
def __init__(self, tasks):
self.tasks = deque(tasks) # 3.
def next(self):
return self.tasks.pop() # 4.
def run(self):
while len(self.tasks): # 5.
task = self.next()
try:
next(task) # 6.
except StopIteration:
pass # 7.
else:
self.tasks.appendleft(task) # 8.
def greeting(obj, times): # 9.
for i in range(1, times+1):
yield # 10.
print("Hello, %s: %d!" % (obj, i))
dispatcher = Dispatcher([
greeting('work1', 5),
greeting('work2', 4),
greeting('work3', 6),
])
dispatcher.run()
# Hello, work3: 1!
# Hello, work2: 1!
# Hello, work1: 1!
# Hello, work3: 2!
# Hello, work2: 2!
# Hello, work1: 2!
# Hello, work3: 3!
# Hello, work2: 3!
# Hello, work1: 3!
# Hello, work3: 4!
# Hello, work2: 4!
# Hello, work1: 4!
# Hello, work3: 5!
# Hello, work1: 5!
# Hello, work3: 6! | [
"cheng.yang@salezoom.io"
] | cheng.yang@salezoom.io |
db09bf8bb97fd12ceb4f932e902a616242bf72e8 | 2bba4782f9085d2c0c324f6668709a81e482e095 | /secao07/ex32.py | b2a68bcb077573982bff6a74836074dc8dc0afd4 | [] | no_license | Saccha/Exercicios_Python | 886ae07392f006226688b8817bf17a7a52020ef9 | e54203cb8754180f0fe120ee60c462c2e74c86e3 | refs/heads/main | 2023-04-18T06:44:42.243579 | 2021-04-18T03:49:30 | 2021-04-18T03:49:30 | 346,230,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | """
32.Leia dois vetores de inteiros x e y, cada um com 5 elementos (assuma
que o usuário não informa elementos repetidos). Calcule e mostre os
vetores resultantes em cada caso abaixo:
. Soma entre x e y: soma de cada elemento x com o elemento da mesma
posição em y.
. Produto entre x e y: multiplicação de cada elemento de x com o
elemento da mesma posição em y.
. Diferença entre x e y: todos os elementos de x que não existam em y.
. Interseção entre x e y: apenas os elementos que aparecem nos dois vetores
. União entre x e y: todos os elemntos de x, e todos os elementos de y
que não estão em x.
"""
x = []
y = []
for i in range(5):
x.append(int(input("Digite um elemento do vetor x: ")))
for i in range(5):
y.append(int(input("Digite um elemento do vetor y: ")))
conjuntox = set(x)
conjuntoy = set(y)
soma = sum(cojuntox) + (conjuntoy)
produto = 1
for i in range(5):
produto *= (x[i] * y[i])
diferenca = conjunto_x.difference(conjunto_y)
intersecao = conjunto_x.intersection(conjunto_y)
uniao = conjunto_x.union(conjunto_y)
print(f"\nSoma entre x e y: {soma}")
print(f"Produto entre x e y: {produto}")
print(f"Diferença entre x e y: {diferenca}")
print(f"Interseção entre x e y: {intersecao}")
print(f"União entre x e y: {uniao}")
| [
"noreply@github.com"
] | Saccha.noreply@github.com |
961155531e12ba7806368a3811fb1b54547d88d7 | 0158a2c57f964f36a2970d060617baf423f5eaa8 | /test/examples100/example13.py | 1e2d8b911de9cbaa9ede7bf68665721f3aacdb47 | [] | no_license | wangzhicheng2013/python | c835f82cfedf4e6e8494cbc1d35681c3109adf1a | ce7d5ea7992964df583e78fa57ee1203c2f6e26d | refs/heads/master | 2022-12-09T08:18:10.112515 | 2019-04-19T12:07:08 | 2019-04-19T12:07:08 | 155,081,543 | 0 | 0 | null | 2022-11-23T21:11:46 | 2018-10-28T14:44:41 | Python | UTF-8 | Python | false | false | 209 | py | import sys
def func(number):
i = number / 100
j = number / 10 % 10
k = number % 10
return i ** 3 + j ** 3 + k ** 3
for i in range(100, 1000):
if i == func(i):
print i
| [
"root@ubuntu.ubuntu-domain"
] | root@ubuntu.ubuntu-domain |
35743bbcf8443bf7eb88f8efb40b61452e8ef1ec | 6d327a390d007ea20b17977a317e116856c5c768 | /src/sllintra/theme/tests/test_functional.py | 7e7cd4ac493a31a9a33551113276b17010ed1f3e | [] | no_license | taito-zz/sllintra.theme | fae565e6f5ff59d02d0082d769c970d432f66ec8 | b7e99454c54094941eca3aec3d1d0bb0d14ed265 | refs/heads/master | 2021-05-30T07:27:32.884147 | 2015-08-10T18:19:01 | 2015-08-10T18:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | from hexagonit.testing.browser import Browser
from plone.app.testing import TEST_USER_ID
from plone.app.testing import TEST_USER_NAME
from plone.app.testing import TEST_USER_PASSWORD
from plone.app.testing import setRoles
from plone.testing import layered
from sllintra.theme.tests.base import FUNCTIONAL_TESTING
from zope.testing import renormalizing
import doctest
import manuel.codeblock
import manuel.doctest
import manuel.testing
import re
import transaction
import unittest
FLAGS = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_NDIFF | doctest.REPORT_ONLY_FIRST_FAILURE
CHECKER = renormalizing.RENormalizing([
# Normalize the generated UUID values to always compare equal.
(re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'), '<UUID>'),
])
def setUp(self):
layer = self.globs['layer']
browser = Browser(layer['app'])
portal = layer['portal']
# Update global variables within the tests.
self.globs.update({
'TEST_USER_NAME': TEST_USER_NAME,
'TEST_USER_PASSWORD': TEST_USER_PASSWORD,
'browser': browser,
'portal': portal,
})
browser.setBaseUrl(portal.absolute_url())
browser.handleErrors = True
portal.error_log._ignored_exceptions = ()
setRoles(portal, TEST_USER_ID, ['Manager'])
transaction.commit()
def DocFileSuite(testfile, flags=FLAGS, setUp=setUp, layer=FUNCTIONAL_TESTING):
"""Returns a test suite configured with a test layer.
:param testfile: Path to a doctest file.
:type testfile: str
:param flags: Doctest test flags.
:type flags: int
:param setUp: Test set up function.
:type setUp: callable
:param layer: Test layer
:type layer: object
:rtype: `manuel.testing.TestSuite`
"""
m = manuel.doctest.Manuel(optionflags=flags, checker=CHECKER)
m += manuel.codeblock.Manuel()
return layered(
manuel.testing.TestSuite(m, testfile, setUp=setUp, globs=dict(layer=layer)),
layer=layer)
def test_suite():
return unittest.TestSuite([DocFileSuite('functional/doctype.txt')])
| [
"taito.horiuchi@gmail.com"
] | taito.horiuchi@gmail.com |
405b146f50c9dd13caf90aef5b307457ae893531 | 7949f96ee7feeaa163608dbd256b0b76d1b89258 | /toontown/coghq/MintRoomBase.py | e60a8dfda7c30df30392dd663def6bc7dad4ba47 | [] | no_license | xxdecryptionxx/ToontownOnline | 414619744b4c40588f9a86c8e01cb951ffe53e2d | e6c20e6ce56f2320217f2ddde8f632a63848bd6b | refs/heads/master | 2021-01-11T03:08:59.934044 | 2018-07-27T01:26:21 | 2018-07-27T01:26:21 | 71,086,644 | 8 | 10 | null | 2018-06-01T00:13:34 | 2016-10-17T00:39:41 | Python | UTF-8 | Python | false | false | 713 | py | # File: t (Python 2.4)
from toontown.toonbase import ToontownGlobals
class MintRoomBase:
def __init__(self):
pass
def setMintId(self, mintId):
self.mintId = mintId
self.cogTrack = ToontownGlobals.cogHQZoneId2dept(mintId)
def setRoomId(self, roomId):
self.roomId = roomId
def getCogTrack(self):
return self.cogTrack
if __dev__:
def getEntityTypeReg(self):
import FactoryEntityTypes as FactoryEntityTypes
EntityTypeRegistry = EntityTypeRegistry
import otp.level
typeReg = EntityTypeRegistry.EntityTypeRegistry(FactoryEntityTypes)
return typeReg
| [
"fr1tzanatore@aol.com"
] | fr1tzanatore@aol.com |
12ecdd5eb0b0958d7c55bebdc3501b320b2a18b4 | 201113fb5d1a4db7b36f6a159d6700685c81a5f1 | /stepik_data_structures/brackets.py | 969f7be005cfcf84d05a4b06c26e53f05a110dc3 | [] | no_license | Leoberium/CS | abdbc26e39e8d02a96d5ea56cf430ddc54d22d42 | b283a3beeb40ebca5258adb5336e9650d9ef3fac | refs/heads/master | 2020-12-28T10:52:41.826510 | 2020-11-28T13:37:15 | 2020-11-28T13:37:15 | 238,299,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | import sys
def balanced(s):
stack = []
d = {']': '[', '}': '{', ')': '('}
for i in range(len(s)):
ch = s[i]
if ch in '[({':
stack.append(i)
elif ch in '])}':
if stack:
if s[stack[-1]] == d[ch]:
stack.pop()
else:
# no match
return i + 1
else:
# empty stack
return i + 1
# if stack is not empty - return position of the first element
return 'Success' if not stack else stack[0] + 1
def main():
s = sys.stdin.readline().strip()
print(balanced(s))
if __name__ == '__main__':
main()
| [
"leo.mazaev@gmail.com"
] | leo.mazaev@gmail.com |
07bfed36ee02e1066de8ca94b1054c2352c4b9e1 | 021c3da6bc9416185732120c9dc11a8bbfde9170 | /db_repository/manage.py | 0d55335168b5208db65f82083cdf32f9f1e691f1 | [
"Apache-2.0"
] | permissive | w940853815/weixin-robot | ed396cf59ad01f7d6223f603f85631411d79d7fe | 73b7b447241c1a74a14b21c6c11fc652b30f7ebb | refs/heads/master | 2021-01-19T08:12:42.933388 | 2018-01-12T03:12:48 | 2018-01-12T03:12:48 | 87,611,249 | 15 | 1 | null | 2018-01-12T03:12:49 | 2017-04-08T05:44:41 | JavaScript | UTF-8 | Python | false | false | 168 | py | #!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main(six='<module 'six' from 'C:\Python27\lib\site-packages\six.pyc'>')
| [
"ruidong.wang@tsingdata.com"
] | ruidong.wang@tsingdata.com |
b975490442e071e5afdc29656ce1bb994fe91cb4 | 5fb8c6df1b2745b4a179a58ed1b6da823979efa2 | /tests/test_buttons.py | cf64731f1f1b0653160bc96f069f819156f38567 | [
"MIT"
] | permissive | sametz/uw_dnmr | b96f9669fae7798a16bf2081c1235975dc8b3cf8 | ad8fca28f37bfe3d3cdb17a054295b2068aed36e | refs/heads/master | 2023-05-12T16:14:14.012825 | 2020-05-06T01:15:07 | 2020-05-06T01:15:07 | 106,963,746 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | from PySide2 import QtCore
from PySide2.QtCore import Slot as pyqtSlot
from PySide2.QtWidgets import QRadioButton
from uw_dnmr.view.widgets.buttons import ABC_ButtonGroup, MultipletButtonGroup
@pyqtSlot(QRadioButton)
def printslot(button):
# print("SLOT TRIGGER")
print(button.objectName())
def test_nspins(qtbot, capsys):
bg = ABC_ButtonGroup()
bg.buttongroup.buttonClicked.connect(printslot)
bg.show()
qtbot.addWidget(bg)
for name, button in bg.buttons.items():
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
captured = capsys.readouterr()
assert captured.out == f'{button.objectName()}\n'
def test_multiplet(qtbot, capsys):
bg = MultipletButtonGroup()
bg.buttongroup.buttonClicked.connect(printslot)
bg.show()
qtbot.addWidget(bg)
for button in [bg.AB_button, bg.AB2_button, bg.ABX_button,
bg.ABX3_button, bg.AAXX_button,
bg.firstorder_button, bg.AABB_button]:
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
captured = capsys.readouterr()
assert captured.out == f'{button.objectName()}\n'
| [
"sametz@udel.edu"
] | sametz@udel.edu |
e357d65a26159b5aadd8455848c3c1cb3a8b19c3 | 82c17d2f12208f77daef47305799800e0fe4df4b | /config/__init__.py | 73b7aa8b919d273bbd5604cee59bf3de67e64061 | [
"MIT"
] | permissive | keremkoseoglu/Guitar-Training-Remote | de59ec0eb68daa7a9a3e09ef6648a0e085587d80 | e4c1ee5a1971ba6b3c09268e25a511425dbd2628 | refs/heads/master | 2023-08-11T06:12:32.351577 | 2023-07-30T06:53:47 | 2023-07-30T06:53:47 | 161,452,837 | 4 | 0 | MIT | 2023-05-23T07:48:03 | 2018-12-12T07:59:22 | Python | UTF-8 | Python | false | false | 1,875 | py | """ Configuration module """
import os
import json
_CONFIGURATION = {}
_DATA_DIR = "data"
def get_configuration() -> dict:
""" Returns configuration """
global _CONFIGURATION
if not _CONFIGURATION:
file_path = _get_config_path()
with open(file_path, encoding="utf-8") as config_file:
_CONFIGURATION = json.load(config_file)
return _CONFIGURATION
def edit_configuration():
""" Edits the configuration file """
file_path = _get_config_path()
os.system(f"open {file_path}")
def get_storage() -> dict:
""" Returns storage """
output = {}
file_path = _get_storage_path()
with open(file_path, encoding="utf-8") as storage_file:
output = json.load(storage_file)
return output
def save_configuration():
""" Saves configuration to the disk """
file_path = _get_config_path()
with open(file_path, "w", encoding="utf-8") as config_file:
json.dump(_CONFIGURATION, config_file)
def save_storage(storage: dict):
""" Writes storage to disk """
file_path = _get_storage_path()
with open(file_path, "w", encoding="utf-8") as storage_file:
json.dump(storage, storage_file, indent=4)
def get_file_path(file_name: str) -> str:
""" Returns path for file """
data_dir = get_data_dir_path()
file_path = os.path.join(data_dir, file_name)
return file_path
def get_dir_path(dir_name: str) -> str:
""" Returns path for file """
data_dir = get_data_dir_path()
dir_path = os.path.join(data_dir, dir_name)
return dir_path
def get_data_dir_path() -> str:
""" Returns path for a dir """
current_dir = os.getcwd()
dir_path = os.path.join(current_dir, _DATA_DIR)
return dir_path
def _get_config_path() -> str:
return get_file_path("config.json")
def _get_storage_path() -> str:
return get_file_path("storage.json")
| [
"kerem@keremkoseoglu.com"
] | kerem@keremkoseoglu.com |
f098d55a0026c91b25d430a40199dce658d65464 | 98d61512fdf7f8426d4634a86edd25669944ab9e | /algorithms/FindLargestValueInEachTreeRow/solution.py | 6aae46e47ca887913b1e9ccac9c7994ba7af492d | [] | no_license | P-ppc/leetcode | 145102804320c6283fa653fc4a7ae89bf745b2fb | 0d90db3f0ca02743ee7d5e959ac7c83cdb435b92 | refs/heads/master | 2021-07-12T02:49:15.369119 | 2018-11-13T05:34:51 | 2018-11-24T12:34:07 | 132,237,265 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
stack = []
if root: stack.append(root)
next_stack = []
values = []
while len(stack) > 0:
node = stack.pop()
values.append(node.val)
if node.left: next_stack.append(node.left)
if node.right: next_stack.append(node.right)
if len(stack) == 0:
res.append(max(values))
stack = next_stack
next_stack = []
values = []
return res | [
"ppc-user@foxmail.com"
] | ppc-user@foxmail.com |
c2f8b0c653b9b2a632f09ab0bc003b304ab84694 | 89d230ad44d17b18897da507725b0a10c32960d8 | /local/RH/decoratorPython.py | 826d80605a350489ba91431d1add5f47b2ee12a9 | [] | no_license | KB-perByte/CodePedia | aeeae87b56cf0ff6e02200cfd6b34da42a007338 | 287e7a3ce981bbf594436cdc06dde23a02b53bb0 | refs/heads/master | 2021-06-19T07:32:53.849871 | 2021-01-23T16:17:27 | 2021-01-23T16:17:27 | 163,250,017 | 0 | 1 | null | 2020-03-21T14:39:36 | 2018-12-27T05:13:55 | JavaScript | UTF-8 | Python | false | false | 824 | py | def first(func):
print("ABC")
def second():
print("DEF")
return None
return second
@first
def third(): #call a method but don't call it in python
print("GHI")
third()
def run_once(f):
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return f(*args, **kwargs)
wrapper.has_run = False
return wrapper
@run_once
def my_function(foo, bar):
print("ala")
return foo+bar
my_function(6, 10)
my_function(6, 10)
from datetime import datetime
def not_during_the_night(func):
def wrapper():
if 7 <= datetime.now().hour < 22:
func()
else:
pass # Hush, the neighbors are asleep
return wrapper
def say_whee():
print("Whee!")
say_whee = not_during_the_night(say_whee) | [
"paul.sagar@yahoo.com"
] | paul.sagar@yahoo.com |
316dae334bb86a62acfef31a2121449065e52937 | 71460476c5f5ebdca719def124f1a0650861fdab | /mint_work/custom/pos_order_history/models/__init__.py | b2513db7cb67252bf9f66e89af52671ac3d1385f | [] | no_license | merdhah/dubai_work | fc3a70dc0b1db6df19c825a3bf1eef2a373d79c0 | e24eb12b276a4cd5b47a4bd5470d915179872a4f | refs/heads/master | 2022-01-07T11:22:07.628435 | 2018-10-17T13:37:24 | 2018-10-17T13:37:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Bista Solutions Pvt. Ltd
# Copyright (C) 2018 (http://www.bistasolutions.com)
#
##############################################################################
import pos_order_history
| [
"asghar0517@gmail.com"
] | asghar0517@gmail.com |
1261b42b0608586081c6164a8301e4a0f49e8b85 | ba80ca143ba35fd481730786a27ebdb1f88ce835 | /algorithm/LC/2.py | 131da9dd69e2ef8227063d26f325ef3b32039361 | [] | no_license | uiandwe/TIL | c541020b65adc53578aeb1c3ba4c6770b3b2e8b3 | 186544469374dd0279099c6c6aa7555ee23e42fe | refs/heads/master | 2022-02-15T08:33:07.270573 | 2022-01-01T15:22:54 | 2022-01-01T15:22:54 | 63,420,931 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
root = head = ListNode()
carry = 0
while l1 or l2 or carry:
l1_val = 0
l2_val = 0
if l1:
l1_val = l1.val
l1 = l1.next
if l2:
l2_val = l2.val
l2 = l2.next
carry, val = divmod(l1_val + l2_val + carry, 10)
head.next = ListNode(val)
head = head.next
return root.next
| [
"noreply@github.com"
] | uiandwe.noreply@github.com |
e29d25c8fb1a7a3ea53ba29c314958803eef3a1d | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/dxtbx/format/FormatSMVADSCSNAPSID19.py | 8eb05eeb4108d0e37ae4e95c80019351c35e4ed0 | [
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 2,814 | py | """
An implementation of the SMV image reader for ADSC images. Inherits from
FormatSMVADSC, customised for example on APS ID19 SN 458 and 914
which have reversed phi.
"""
from __future__ import absolute_import, division, print_function
import sys
from dxtbx.format.FormatSMVADSCSN import FormatSMVADSCSN
class FormatSMVADSCSNAPSID19(FormatSMVADSCSN):
"""A class for reading SMV format ADSC images, and correctly constructing
a model for the experiment from this, for instrument numbers 458 and 914
from the APS ID19 beamline."""
@staticmethod
def understand(image_file):
"""Check to see if this is ADSC SN 458 or 914."""
# check this is detector serial number 458 or 914
size, header = FormatSMVADSCSN.get_smv_header(image_file)
if int(header["DETECTOR_SN"]) not in (458, 914):
return False
return True
def _detector(self):
"""Return a model for a simple detector, presuming no one has
one of these on a two-theta stage. Assert that the beam centre is
provided in the Mosflm coordinate frame."""
distance = float(self._header_dictionary["DISTANCE"])
pixel_size = float(self._header_dictionary["PIXEL_SIZE"])
image_size = (
float(self._header_dictionary["SIZE1"]),
float(self._header_dictionary["SIZE2"]),
)
key = [
s for s in self._header_dictionary if s.endswith("_SPATIAL_BEAM_POSITION")
][0]
beam_x, beam_y = [
float(f) * pixel_size for f in self._header_dictionary[key].split()
]
return self._detector_factory.simple(
"CCD",
distance,
(beam_y, (image_size[1] * pixel_size) - beam_x),
"+x",
"-y",
(pixel_size, pixel_size),
image_size,
self._adsc_trusted_range(),
[],
gain=self._adsc_module_gain(),
)
def _goniometer(self):
"""Return a model for a simple single-axis goniometer. This should
probably be checked against the image header."""
return self._goniometer_factory.single_axis_reverse()
def _scan(self):
"""Return the scan information for this image. There may be
no timestamps in there..."""
format = self._scan_factory.format("SMV")
exposure_time = float(self._header_dictionary["TIME"])
epoch = 0
osc_start = float(self._header_dictionary["OSC_START"])
osc_range = float(self._header_dictionary["OSC_RANGE"])
return self._scan_factory.single(
self._image_file, format, exposure_time, osc_start, osc_range, epoch
)
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatSMVADSCSN.understand(arg))
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
78b5dedb32fcedb3494d47a5bc59c98dc56ace50 | 2ee4f5aa01ecdacaf66db556b2a289df4dd5e891 | /juriscraper/lib/cookie_utils.py | ecb52db2c63d00c2ca440857926997e4453288d9 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Alex-Devoid/juriscraper | f9f94f3da1a9fca12a21849922e4898ff0e42564 | 6ac2d5182dbf3aea14d918e645d582e9b42c9dd6 | refs/heads/master | 2020-12-12T01:38:02.511764 | 2020-01-17T07:55:16 | 2020-01-17T07:55:16 | 234,010,973 | 0 | 0 | NOASSERTION | 2020-01-15T06:09:26 | 2020-01-15T06:09:26 | null | UTF-8 | Python | false | false | 665 | py | from requests.cookies import RequestsCookieJar
def normalize_cookies(cookies):
"""Takes cookies from PhantomJS/Selenium or from Python Requests and
converts them to dict.
This throws away information that Selenium otherwise has (like the host and
such), but a dict is essentially all we need.
"""
requests_cookies = {}
if type(cookies) == list:
# Phantom/Selenium cookies
for cookie in cookies:
requests_cookies[cookie['name']] = cookie['value']
elif type(cookies) == RequestsCookieJar:
# Requests cookies. Convert to dict.
requests_cookies = dict(cookies)
return requests_cookies
| [
"mlissner@michaeljaylissner.com"
] | mlissner@michaeljaylissner.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.