blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46d2ac900cb2334cf8877854381065b9d8cf5c00 | 670c844e5cfa1cdf11212cc53972ecd8f7a25949 | /python/test/test_LargestBSTSubtree.py | 28e8a1e7c813b055ea87c428c3452719fdf61b41 | [] | no_license | IamConstantine/LeetCodeFiddle | 74d580a0741e40397f1283beadd023f9b9323abd | 5ec509505a394d53517fb005bbeb36745f06596a | refs/heads/master | 2022-05-31T05:25:14.273605 | 2022-05-23T02:46:47 | 2022-05-23T02:46:47 | 100,910,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | from unittest import TestCase
from LargestBSTSubtree import largestBSTSubtree
from Tree import createBinaryTreeFrom
class Test(TestCase):
def test_largest_bstsubtree(self):
self.assertEqual(3, largestBSTSubtree(createBinaryTreeFrom([10, 5, 15, 1, 8, None, 7])))
self.assertEqual(2, largestBSTSubtree(
createBinaryTreeFrom([4, 2, 7, 2, 3, 5, None, 2, None, None, None, None, None, 1])))
| [
"vishalskumar12@gmail.com"
] | vishalskumar12@gmail.com |
13cc2696c4030efa69cafab66b7e9d4e2898e284 | 09fbeed610a8d85f226ed93cc3acf7433e26a2fa | /python/벽 부수고 이동하기.py | 40a735647dd06095b0fe457714be2eab5451ca14 | [] | no_license | Daboni/PS | 819744ccb1dc1e832a034f5ecf65c0241c3ee4e5 | 6c81cdc0569981db0bce6c540fecf1fb0c5c2805 | refs/heads/master | 2023-08-23T13:42:40.084793 | 2021-10-08T08:29:53 | 2021-10-08T08:29:53 | 290,971,583 | 0 | 0 | null | 2020-12-16T02:37:18 | 2020-08-28T06:42:53 | null | UTF-8 | Python | false | false | 952 | py | from collections import deque
n,m = map(int,input().split())
a = [ list(map(int,list(input()))) for _ in range(n) ]
dist = [ [ [0]*2 for j in range(m)] for i in range(n) ]
dx = [ 0, 0, 1, -1]
dy = [ 1, -1, 0, 0]
q = deque()
q.append((0,0,0))
dist[0][0][0] = 1
while q:
x,y,z = q.popleft()
for k in range(4):
nx,ny = x+dx[k], y+dy[k]
if 0<=nx<n and 0<=ny<m:
if a[nx][ny] == 0 and dist[nx][ny][z] == 0:
dist[nx][ny][z] = dist[x][y][z] + 1
q.append((nx,ny,z))
if z == 0 and a[nx][ny] == 1 and dist[nx][ny][z] == 0:
dist[nx][ny][z+1] = dist[x][y][z] + 1
q.append((nx,ny,z+1))
if dist[n-1][m-1][0] != 0 and dist[n-1][m-1][1] != 0:
print(min(dist[n-1][m-1]))
elif dist[n-1][m-1][0] != 0:
print(dist[n-1][m-1][0])
elif dist[n-1][m-1][1] != 0 :
print(dist[n-1][m-1][1])
else:
print(-1)
| [
"noreply@github.com"
] | Daboni.noreply@github.com |
1218581a5ee1a3e8e642802190640ef51822c97b | 868f5c1da9a0be1f4b477c99a77753f53b1ce094 | /Password-Strength-Analysis-master/DecisionTree.py | 9acb7c1fd7a40d165f040dfc3362721048867ec5 | [] | no_license | vaibhavkrishna-bhosle/Data-Science-Projects-with-Python | 9dd078656472dd1c19b21e16690e5c85e3d5c439 | 6ad88a96e2ce4a0fe3a8ff86f3922240e1a0c574 | refs/heads/main | 2023-05-07T04:57:55.868559 | 2021-06-05T16:01:41 | 2021-06-05T16:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # Import the necessary Libraries
import pandas as pd
# For text feature extraction
from sklearn.feature_extraction.text import TfidfVectorizer
# For creating a pipeline
from sklearn.pipeline import Pipeline
# Classifier Model (Decision Tree)
from sklearn.tree import DecisionTreeClassifier
# To save the trained model on local storage
from sklearn.externals import joblib
# Read the File
data = pd.read_csv('training.csv')
# Features which are passwords
features = data.values[:, 1].astype('str')
# Labels which are strength of password
labels = data.values[:, -1].astype('int')
# Sequentially apply a list of transforms and a final estimator
classifier_model = Pipeline([
('tfidf', TfidfVectorizer(analyzer='char')),
('decisionTree',DecisionTreeClassifier()),
])
# Fit the Model
classifier_model.fit(features, labels)
# Training Accuracy
print('Training Accuracy: ',classifier_model.score(features, labels))
# Save model for Logistic Regression
joblib.dump(classifier_model, 'DecisionTree_Model.joblib') | [
"vaibhavkrishna.bhosle@gmail.com"
] | vaibhavkrishna.bhosle@gmail.com |
e563875ae64eed13fbc5d0f64d5615e2ab320060 | 89c771acabae2ba0b3b95c8817d696cf4a24413d | /znop/exceptions.py | 5f3188b0f2c186ca15cbf22db450aa9d07e70313 | [
"MIT"
] | permissive | iann838/Znop | 111f0d9ae68959d7d5d7856f0397165b0373ce1a | 631a14a7a24f32965e497755003eb92ded5c48ca | refs/heads/main | 2023-06-02T20:48:56.763554 | 2020-10-26T01:36:47 | 2020-10-26T01:36:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py |
class ZSetError(Exception):
def __init__(self):
Exception.__init__(self, "Operation between ZnInt of different Z set")
class ZVarError(Exception):
def __init__(self):
Exception.__init__(self, "Operation between ZnInt of different variables outside products")
class ParseError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class ResolveError(Exception):
def __init__(self):
Exception.__init__(self, "Could not resolve equation")
| [
"paaksingtech@gmail.com"
] | paaksingtech@gmail.com |
7df396efb2568567394b125d940a7fd300c7ffcb | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/flux_led/util.py | 6bcf4538dfb80c5123e0a2c5f53110201feba273 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 4,254 | py | """Utils for Magic Home."""
from __future__ import annotations
from flux_led.aio import AIOWifiLedBulb
from flux_led.const import COLOR_MODE_DIM as FLUX_COLOR_MODE_DIM, MultiColorEffects
from homeassistant.components.light import ColorMode
from homeassistant.util.color import color_hsv_to_RGB, color_RGB_to_hsv
from .const import FLUX_COLOR_MODE_TO_HASS, MIN_RGB_BRIGHTNESS
def _hass_color_modes(device: AIOWifiLedBulb) -> set[str]:
color_modes = device.color_modes
return {_flux_color_mode_to_hass(mode, color_modes) for mode in color_modes}
def format_as_flux_mac(mac: str | None) -> str | None:
"""Convert a device registry formatted mac to flux mac."""
return None if mac is None else mac.replace(":", "").upper()
def _human_readable_option(const_option: str) -> str:
return const_option.replace("_", " ").title()
def mac_matches_by_one(formatted_mac_1: str, formatted_mac_2: str) -> bool:
"""Check if a mac address is only one digit off.
Some of the devices have two mac addresses which are
one off from each other. We need to treat them as the same
since its the same device.
"""
mac_int_1 = int(formatted_mac_1.replace(":", ""), 16)
mac_int_2 = int(formatted_mac_2.replace(":", ""), 16)
return abs(mac_int_1 - mac_int_2) < 2
def _flux_color_mode_to_hass(
flux_color_mode: str | None, flux_color_modes: set[str]
) -> ColorMode:
"""Map the flux color mode to Home Assistant color mode."""
if flux_color_mode is None:
return ColorMode.ONOFF
if flux_color_mode == FLUX_COLOR_MODE_DIM:
if len(flux_color_modes) > 1:
return ColorMode.WHITE
return ColorMode.BRIGHTNESS
return FLUX_COLOR_MODE_TO_HASS.get(flux_color_mode, ColorMode.ONOFF)
def _effect_brightness(brightness: int) -> int:
"""Convert hass brightness to effect brightness."""
return round(brightness / 255 * 100)
def _str_to_multi_color_effect(effect_str: str) -> MultiColorEffects:
"""Convert an multicolor effect string to MultiColorEffects."""
for effect in MultiColorEffects:
if effect.name.lower() == effect_str:
return effect
# unreachable due to schema validation
assert False # pragma: no cover
def _is_zero_rgb_brightness(rgb: tuple[int, int, int]) -> bool:
"""RGB brightness is zero."""
return all(byte == 0 for byte in rgb)
def _min_rgb_brightness(rgb: tuple[int, int, int]) -> tuple[int, int, int]:
"""Ensure the RGB value will not turn off the device from a turn on command."""
if _is_zero_rgb_brightness(rgb):
return (MIN_RGB_BRIGHTNESS, MIN_RGB_BRIGHTNESS, MIN_RGB_BRIGHTNESS)
return rgb
def _min_scaled_rgb_brightness(rgb: tuple[int, int, int]) -> tuple[int, int, int]:
"""Scale an RGB tuple to minimum brightness."""
return color_hsv_to_RGB(*color_RGB_to_hsv(*rgb)[:2], 1)
def _min_rgbw_brightness(
rgbw: tuple[int, int, int, int], current_rgbw: tuple[int, int, int, int]
) -> tuple[int, int, int, int]:
"""Ensure the RGBW value will not turn off the device from a turn on command.
For RGBW, we also need to ensure that there is at least one
value in the RGB fields or the device will switch to CCT mode unexpectedly.
If the new value being set is all zeros, scale the current
color to brightness of 1 so we do not unexpected switch to white
"""
if _is_zero_rgb_brightness(rgbw[:3]):
return (*_min_scaled_rgb_brightness(current_rgbw[:3]), rgbw[3])
return (*_min_rgb_brightness(rgbw[:3]), rgbw[3])
def _min_rgbwc_brightness(
rgbwc: tuple[int, int, int, int, int], current_rgbwc: tuple[int, int, int, int, int]
) -> tuple[int, int, int, int, int]:
"""Ensure the RGBWC value will not turn off the device from a turn on command.
For RGBWC, we also need to ensure that there is at least one
value in the RGB fields or the device will switch to CCT mode unexpectedly
If the new value being set is all zeros, scale the current
color to brightness of 1 so we do not unexpected switch to white
"""
if _is_zero_rgb_brightness(rgbwc[:3]):
return (*_min_scaled_rgb_brightness(current_rgbwc[:3]), rgbwc[3], rgbwc[4])
return (*_min_rgb_brightness(rgbwc[:3]), rgbwc[3], rgbwc[4])
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
39ee5932bfd557ee2fe4e3dabcad17dfc8e81e82 | f1b8f0a0c44fbdb9d270415f6b4edb1672a234dd | /2strings_follows.py | 623522a78ce36a09add7390e5cc331f36d34b2d9 | [] | no_license | devika4365/programs_i_done | 2df10890fb55e893eb572aab6cf09e673a0438ef | e09f6ac6c0f1281b7ee9c21a5d1faff999a7a8ec | refs/heads/main | 2023-04-06T02:45:04.492005 | 2021-04-07T15:59:31 | 2021-04-07T15:59:31 | 355,602,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | l=input()
d=input()
k=list(d)
j=len(l)-len(d)
y=''
m=0
for w in d:
if w=='*':
q=k.index('*')
k.pop(q)
for h in range(j+1):
k.insert(q,'_')
for t in k:
y+=t
print(y)
i=0
while i<len(y) :
if l[i]=="_":
i+=1
continue
elif y[i]=='_':
i+=1
if i<len(l) and i<len(y):
continue
else:
m=1
break
elif l[i]!=y[i]:
m=1
break
i+=1
if m==0:
print("follows")
else:
print("not follows")
| [
"noreply@github.com"
] | devika4365.noreply@github.com |
bef2a15595565d4e10e06de1032ca6f65de17582 | e43623d3819022510f5a41af57cfd4590d336804 | /boxuegu/apps/courses/migrations/0009_auto_20190612_1218.py | 5eca3a32861b6ad0a644919fe4debd913a44436d | [
"MIT"
] | permissive | libin-c/bxg | a1a299ac55c02b73fbffb131e464774a926fdae2 | c509a5b39bc3f3f34ad9d7fbfb61a63d2f67bc23 | refs/heads/master | 2020-06-03T06:11:35.685109 | 2019-06-13T14:27:02 | 2019-06-13T14:27:02 | 191,474,660 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-06-12 12:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0008_bannercourse'),
]
operations = [
migrations.AlterField(
model_name='course',
name='course_org',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.CourseOrg', verbose_name='课程学校'),
),
]
| [
"84045407@qq.com"
] | 84045407@qq.com |
3a68ef2ebde3de399b39f874dbd2567c72d8d4b4 | 23f4ae6dd155449eb6ad2da6fa09209ca8e459d0 | /gen/__init__.py | 78c757bfa0ae31fe586e28a998dcaa7b0797a0bd | [] | no_license | pombreda/gen | 381267d9f61652f0056fecbb4bc223b630bc2480 | 0372b0395f224c63ab5a69c3829a329512efe1fb | refs/heads/master | 2021-01-20T10:53:39.171500 | 2015-04-08T13:14:17 | 2015-04-08T13:14:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | import os
import click
from . import generate, loader, parser, cli
__version__ = '0.2.dev1'
class ExtendedCommand(click.Command):
allow_extra_args = True
allow_interspersed_args=False
ignore_unknown_options = True
@click.command(cls=ExtendedCommand)
@click.argument('template')
@click.option('--target', '-d', default='.',
help="Render the template into this target directory (default: cwd)")
@click.version_option(version=__version__)
@click.pass_context
def main(ctx, template, target):
"Generate files from TEMPLATE"
if os.path.isfile(template):
files, variables = parser.parse_yaml(template)
def callback(**kw):
for f in files:
generate.build(f, target=target, variables=kw)
cmd = cli.variable_parser(variables, callback)
cmd(ctx.args)
else:
l = loader.FilesystemLoader(template)
for name in l.list_templates():
spec = {
'name': name,
'content': l.get_source(name),
'executable': l.get_executable(name),
}
generate.build(spec, target=target)
| [
"p.f.moore@gmail.com"
] | p.f.moore@gmail.com |
3881c0fb7c6bdcdc9783303f654cb06ee895f6ca | a8be4698c0a43edc3622837fbe2a98e92680f48a | /SSAFY알고리즘정규시간 Problem Solving/9월 Problem Solving/0924실습/5208전기버스2.py | 87e8497006ec10493cbf4fec40d07c68dd7edb2b | [] | no_license | blueboy1593/algorithm | fa8064241f7738a12b33544413c299e7c1e1a908 | 9d6fdd82b711ba16ad613edcc041cbecadd85e2d | refs/heads/master | 2021-06-23T22:44:06.120932 | 2021-02-21T10:44:16 | 2021-02-21T10:44:16 | 199,543,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | import sys
sys.stdin = open("5208_input.txt", "r")
def backtrack(i, cnt):
global min_cnt
dis_possible = lpg_list[i]
if i + dis_possible >= N:
if cnt < min_cnt:
min_cnt = cnt
return
else:
cnt += 1
if cnt > min_cnt:
return
for j in range(1, dis_possible + 1):
i = i + j
if i < N:
backtrack(i, cnt)
i = i - j
T = int(input())
for tc in range(1, T + 1):
lpg_list = list(map(int, input().split()))
N = lpg_list[0]
min_cnt = 999
cnt = 1
gas = lpg_list[1]
for k in range(1, gas + 1):
backtrack(1 + k, cnt)
print("#%d %d" %(tc,min_cnt)) | [
"snb0303@naver.com"
] | snb0303@naver.com |
38efb1ee8f2621f1eb87d724df7b5e7212dbf672 | cd2c85500d420a67c433113cf43a734669134423 | /build/v4r_ros_wrappers/segmentation_srv_definitions/catkin_generated/pkg.develspace.context.pc.py | 4e74f46d8002238624505649ab8fea71c18f305c | [] | no_license | 0000duck/youbot_mobile_manipulation_WS | f942974724dd19c9c92e852ccbd056e29d9c6049 | 0e966211c8d7135dc7cffedbb10b15459398ef8f | refs/heads/master | 2020-12-10T11:59:30.700737 | 2017-07-17T13:49:12 | 2017-07-17T13:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ros/catkin_ws/devel/include".split(';') if "/home/ros/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "segmentation_srv_definitions"
PROJECT_SPACE_DIR = "/home/ros/catkin_ws/devel"
PROJECT_VERSION = "0.1.4"
| [
"mohdnaveed96@gmail.com"
] | mohdnaveed96@gmail.com |
59868bde98baef6553bc297d6e3a9a5961b17e57 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/849 Maximize Distance to Closest Person.py | b6e54714a63014072b31a7d7edc3ed3900de8b9e | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,333 | py | #!/usr/bin/python3
"""
In a row of seats, 1 represents a person sitting in that seat, and 0 represents
that the seat is empty.
There is at least one empty seat, and at least one person sitting.
Alex wants to sit in the seat such that the distance between him and the closest
person to him is maximized.
Return that maximum distance to closest person.
Example 1:
Input: [1,0,0,0,1,0,1]
Output: 2
Explanation:
If Alex sits in the second open seat (seats[2]), then the closest person has
distance 2.
If Alex sits in any other open seat, the closest person has distance 1.
Thus, the maximum distance to the closest person is 2.
Example 2:
Input: [1,0,0,0]
Output: 3
Explanation:
If Alex sits in the last seat, the closest person is 3 seats away.
This is the maximum distance possible, so the answer is 3.
Note:
1 <= seats.length <= 20000
seats contains only 0s or 1s, at least one 0, and at least one 1.
"""
____ t___ _______ L..
c_ Solution:
___ maxDistToClosest seats: L.. i.. __ i..
"""
DP from left and right - next array
Let L[i] be the distant to the left 1 at A[i]
Let R[i] ...
"""
n l..(seats)
L [f__("inf") ___ _ __ r..(n)]
R [f__("inf") ___ _ __ r..(n)]
___ i __ r..(n
__ seats[i] __ 1:
L[i] 0
____ i - 1 >_ 0:
L[i] L[i-1] + 1
___ i __ r..(n-1, -1 , -1
__ seats[i] __ 1:
R[i] 0
____ i + 1 < n:
R[i] R[i+1] + 1
r.. m..(
m..(L[i], R[i])
___ i __ r..(n)
)
___ maxDistToClosest2 seats: L.. i.. __ i..
"""
maintain a sorrted index array
"""
idxes # list
___ i, e __ e..(seats
__ e __ 1:
idxes.a..(i)
ret [-f__("inf"), 0]
n l..(seats)
# two ends
___ i, j __ z..((0, n-1), (0, -1:
dist a..(i - idxes[j])
__ dist > ret[0]:
ret [dist, i]
___ j __ r..(l..(idxes) - 1
i (idxes[j] + idxes[j+1]) // 2
dist m..(a..(i - idxes[j]), a..(i - idxes[j+1]
__ dist > ret[0]:
ret [dist, i]
r.. ret[0]
__ _______ __ _______
... Solution().maxDistToClosest([1,0,0,0,1,0,1]) __ 2
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
f2fdf829d5ea58bda66bc286d53f322c80596ce0 | 66153b1864abee82006e060438887862ae9577c5 | /L4/L4_7.py | 3ae4d3d988bf7def949804ac0d9115a34790dd24 | [] | no_license | ArvinZJC/BUAAHND_G2T2_Python | d05f3399b13394252f2a187f9915f33bdb03b5f9 | 1f5ad1a894559717c13cae6c683a3a984ee05ad6 | refs/heads/master | 2021-06-03T13:19:58.120021 | 2020-02-18T20:20:35 | 2020-02-18T20:20:35 | 147,664,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | # 第5章.pptx, P40, program that imports module math
import math
print(math.e)
print(math.pi)
print(math.ceil(3.4))
print(math.fabs(-3))
print(math.floor(3.4))
print(math.sqrt(4))
print(math.trunc(3.4)) | [
"tomzjc@qq.com"
] | tomzjc@qq.com |
db449c1334babcf141a25c05499799d80ad717ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02925/s102175948.py | 8626594420d162d567dd933824c89bb48507f45f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from collections import deque
n=int(input())
a=[list(map(int,input().split())) for i in range(n)]
for i in range(n):
for j in range(n-1):
a[i][j]-=1
a[i].reverse()
# print(a)
day=[0]*n
pair=[-1]*n
q=deque(range(n))
while q:
me=q.popleft()
if not a[me]:
continue
oppo=a[me].pop()
if pair[oppo]==me:
now=max(day[me],day[oppo])+1
day[me]=now
day[oppo]=now
q.append(me)
q.append(oppo)
else:
pair[me]=oppo
for i in range(n):
if a[i]:
print(-1)
break
else:
print(max(day)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d2495c2093ddf4a8ee6ec28e4ad2bac359359ed0 | 60b5a9a8b519cb773aca004b7217637f8a1a0526 | /inspection/urls.py | 2c70bca05de76c783a1ed7e5085bb4b8f919a278 | [] | no_license | malep2007/dag-bragan-erp-backend | 76ce90c408b21b0bda73c6dd972e2f77b7f21b1f | e98182af2848a6533ddd28c586649a8fee1dc695 | refs/heads/master | 2021-08-11T01:29:27.864747 | 2019-01-15T17:46:26 | 2019-01-15T17:46:26 | 151,831,965 | 0 | 0 | null | 2021-06-10T20:56:21 | 2018-10-06T11:10:12 | Python | UTF-8 | Python | false | false | 441 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.PropertyDetailList.as_view(), name='index'),
path('detail/<int:pk>', views.PropertyDetailView.as_view(), name='detail'),
path('edit/<int:pk>/', views.PropertyUpdateView.as_view(), name='edit'),
path('delete/<int:pk>', views.PropertyDeleteView.as_view(), name='delete'),
path('create/', views.PropertyCreateView.as_view(), name='create'),
]
| [
"ephraim.malinga@gmail.com"
] | ephraim.malinga@gmail.com |
9a9a42ee81a71f98f4e379cea9c7726b49a5df3a | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/f3_wrong_hints/scaling_software_termination/11-2Nested_false-termination_23.py | d2168de86996467b1613aa8d401f0ed5c1e2f819 | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,414 | py | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, mgr.Plus(pc, i_1)))
loc1 = Location(env, mgr.GT(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.TRUE())
loc0.set_progress(0, mgr.TRUE())
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0])
res.append(h_pc)
loc0 = Location(env, mgr.LE(x, i_0))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, x)))
loc1 = Location(env, mgr.GE(x, i_0))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x5", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.LE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
return frozenset(res)
| [
"en.magnago@gmail.com"
] | en.magnago@gmail.com |
c6763b313c55dfbb26de2d027ecc5df9716cc657 | f34d3948b707e461151ee33296a61fb23a6d3f44 | /month01/day10/day10/exercise03.py | 230ff438aaca319979f8254df0dcf1da291c2398 | [] | no_license | xiao-a-jian/python-study | f9c4e3ee7a2f9ae83bec6afa7c7b5434e8243ed8 | c8e8071277bcea8463bf6f2e8cd9e30ae0f1ddf3 | refs/heads/master | 2022-06-09T17:44:41.804228 | 2020-05-05T07:48:07 | 2020-05-05T07:48:07 | 256,927,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | # 练习1:创建手机类,实例化2个对象
class MobilePhone:
"""
抽象的手机类
"""
# 数据:品牌 价格 颜色
def __init__(self, brand, price, color):
"""
创建一个新手机对象
:param brand: 品牌
:param price: 单价
:param color: 颜色
"""
self.brand = brand
self.price = price
self.color = color
# 行为:拍照,砸核桃
def take_picture(self):
"""
:return:
"""
print(self.brand, "拍照")
def smash_nut(self):
"""
:return:
"""
print(self.brand, "砸核桃")
# 2. 具体化
HW = MobilePhone("华为P30", 5000, "green")
HW.take_picture()
iphone = MobilePhone("苹果", 5000, "白色")
iphone.take_picture()
# 练习2:画出下列代码内存图
mp01 = HW
HW.price = 6000
print(mp01.price) # ?
mp02 = iphone
iphone = MobilePhone("苹果",10000,"白色")
print(mp02.price) # ?
# 练习3:画出下列代码内存图
list01 = [
mp01,
mp02,
MobilePhone("三星", 4000, "蓝色")
]
list01[0].color = "红色"
list02 = list01[1:2]
list02[0].color = "粉色"
for item in list01:
print(item.color)
| [
"1261247299@qq.com"
] | 1261247299@qq.com |
0a67a16f0095e36e51adb2684562ff0c065fcc95 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jedi/third_party/typeshed/stdlib/2and3/termios.pyi | 9eecbf68136fb417c1fead051c8b49eecdbc598f | [
"MIT",
"Apache-2.0"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 3,504 | pyi | from _typeshed import FileDescriptorLike
from typing import Any, List, Union
_Attr = List[Union[int, List[Union[bytes, int]]]]
# TODO constants not really documented
B0: int
B1000000: int
B110: int
B115200: int
B1152000: int
B1200: int
B134: int
B150: int
B1500000: int
B1800: int
B19200: int
B200: int
B2000000: int
B230400: int
B2400: int
B2500000: int
B300: int
B3000000: int
B3500000: int
B38400: int
B4000000: int
B460800: int
B4800: int
B50: int
B500000: int
B57600: int
B576000: int
B600: int
B75: int
B921600: int
B9600: int
BRKINT: int
BS0: int
BS1: int
BSDLY: int
CBAUD: int
CBAUDEX: int
CDSUSP: int
CEOF: int
CEOL: int
CEOT: int
CERASE: int
CFLUSH: int
CIBAUD: int
CINTR: int
CKILL: int
CLNEXT: int
CLOCAL: int
CQUIT: int
CR0: int
CR1: int
CR2: int
CR3: int
CRDLY: int
CREAD: int
CRPRNT: int
CRTSCTS: int
CS5: int
CS6: int
CS7: int
CS8: int
CSIZE: int
CSTART: int
CSTOP: int
CSTOPB: int
CSUSP: int
CWERASE: int
ECHO: int
ECHOCTL: int
ECHOE: int
ECHOK: int
ECHOKE: int
ECHONL: int
ECHOPRT: int
EXTA: int
EXTB: int
FF0: int
FF1: int
FFDLY: int
FIOASYNC: int
FIOCLEX: int
FIONBIO: int
FIONCLEX: int
FIONREAD: int
FLUSHO: int
HUPCL: int
ICANON: int
ICRNL: int
IEXTEN: int
IGNBRK: int
IGNCR: int
IGNPAR: int
IMAXBEL: int
INLCR: int
INPCK: int
IOCSIZE_MASK: int
IOCSIZE_SHIFT: int
ISIG: int
ISTRIP: int
IUCLC: int
IXANY: int
IXOFF: int
IXON: int
NCC: int
NCCS: int
NL0: int
NL1: int
NLDLY: int
NOFLSH: int
N_MOUSE: int
N_PPP: int
N_SLIP: int
N_STRIP: int
N_TTY: int
OCRNL: int
OFDEL: int
OFILL: int
OLCUC: int
ONLCR: int
ONLRET: int
ONOCR: int
OPOST: int
PARENB: int
PARMRK: int
PARODD: int
PENDIN: int
TAB0: int
TAB1: int
TAB2: int
TAB3: int
TABDLY: int
TCFLSH: int
TCGETA: int
TCGETS: int
TCIFLUSH: int
TCIOFF: int
TCIOFLUSH: int
TCION: int
TCOFLUSH: int
TCOOFF: int
TCOON: int
TCSADRAIN: int
TCSAFLUSH: int
TCSANOW: int
TCSBRK: int
TCSBRKP: int
TCSETA: int
TCSETAF: int
TCSETAW: int
TCSETS: int
TCSETSF: int
TCSETSW: int
TCXONC: int
TIOCCONS: int
TIOCEXCL: int
TIOCGETD: int
TIOCGICOUNT: int
TIOCGLCKTRMIOS: int
TIOCGPGRP: int
TIOCGSERIAL: int
TIOCGSOFTCAR: int
TIOCGWINSZ: int
TIOCINQ: int
TIOCLINUX: int
TIOCMBIC: int
TIOCMBIS: int
TIOCMGET: int
TIOCMIWAIT: int
TIOCMSET: int
TIOCM_CAR: int
TIOCM_CD: int
TIOCM_CTS: int
TIOCM_DSR: int
TIOCM_DTR: int
TIOCM_LE: int
TIOCM_RI: int
TIOCM_RNG: int
TIOCM_RTS: int
TIOCM_SR: int
TIOCM_ST: int
TIOCNOTTY: int
TIOCNXCL: int
TIOCOUTQ: int
TIOCPKT: int
TIOCPKT_DATA: int
TIOCPKT_DOSTOP: int
TIOCPKT_FLUSHREAD: int
TIOCPKT_FLUSHWRITE: int
TIOCPKT_NOSTOP: int
TIOCPKT_START: int
TIOCPKT_STOP: int
TIOCSCTTY: int
TIOCSERCONFIG: int
TIOCSERGETLSR: int
TIOCSERGETMULTI: int
TIOCSERGSTRUCT: int
TIOCSERGWILD: int
TIOCSERSETMULTI: int
TIOCSERSWILD: int
TIOCSER_TEMT: int
TIOCSETD: int
TIOCSLCKTRMIOS: int
TIOCSPGRP: int
TIOCSSERIAL: int
TIOCSSOFTCAR: int
TIOCSTI: int
TIOCSWINSZ: int
TOSTOP: int
VDISCARD: int
VEOF: int
VEOL: int
VEOL2: int
VERASE: int
VINTR: int
VKILL: int
VLNEXT: int
VMIN: int
VQUIT: int
VREPRINT: int
VSTART: int
VSTOP: int
VSUSP: int
VSWTC: int
VSWTCH: int
VT0: int
VT1: int
VTDLY: int
VTIME: int
VWERASE: int
XCASE: int
XTABS: int
def tcgetattr(fd: FileDescriptorLike) -> List[Any]: ...
def tcsetattr(fd: FileDescriptorLike, when: int, attributes: _Attr) -> None: ...
def tcsendbreak(fd: FileDescriptorLike, duration: int) -> None: ...
def tcdrain(fd: FileDescriptorLike) -> None: ...
def tcflush(fd: FileDescriptorLike, queue: int) -> None: ...
def tcflow(fd: FileDescriptorLike, action: int) -> None: ...
class error(Exception): ...
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
15b28aeeba051cdcd4d812f93e12f48fc059c25c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_335/ch25_2019_08_21_17_45_54_086418.py | 55459b6fc9131d41cddf928dbe752d80e4db31d4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | def PrecoPassagem(d):
if (d<=200):
v = d*0.50
return v
else:
v = 100 + ((d - 200)*0.45)
return v
distancia = int(input("Quantos km você deseja percorrer? "))
print ("Você pagará R$: {0:.2f}".format(PrecoPassagem(distancia))) | [
"you@example.com"
] | you@example.com |
6158b06b89565bf9aeb58c8fca215bd7dc836d5e | 585e04dbc338efb5a9f8861e9970bd9bfc224f44 | /src/Display/GraphTest.py | 331754a58b32bf159995c2d1e315dbe54b4d42e0 | [] | no_license | CONNJUR/PyScheduler | 34dba79baf881216dfd06a1421849603f35b145f | 150f60495d5a0b86bb211f4c5d691e7d79a9e0b7 | refs/heads/master | 2021-01-23T08:52:46.891768 | 2014-04-26T15:37:03 | 2014-04-26T15:37:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | '''
Created on Feb 13, 2011
@author: matt
'''
import Graph as g
import Tkinter as Tk
def displayPoints(schedule, xmax, ymax):
frame = Tk.Tk()
graph = g.Graph2D(frame, xmax, ymax)
frame.title('Schedule')
frame.geometry('700x680+150+5')
for point in schedule.getPoints():
graph.addPoint(point)
frame.mainloop()
| [
"mfenwick100@gmail.com"
] | mfenwick100@gmail.com |
63f62ab97ea33e088f5603bb0393134aad47a6c7 | 40948fe9f867ac5ebe4937ff2922449e79cd26dc | /lib/redis_coon/__init__.py | e7d1975825885ba64581e230bd8e9b71aef76a04 | [] | no_license | Suyn/bz_program | 706910df2120eb722346753f72595e1088c579ea | a691e79f692cc47f6ffc912a4be11e63d5d1f14f | refs/heads/master | 2020-04-16T13:04:25.543202 | 2019-01-14T06:17:39 | 2019-01-14T06:17:39 | 165,191,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/9 16:43
# @Author : Liquid
# @Site :
# @File : __init__.py.py
# @Software: PyCharm
| [
"myEmail@example.com"
] | myEmail@example.com |
f9268273cc0b87383f904db3290a20fa761bb391 | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/closed_list_model_update_object.py | c9a72314d9bf830200d9a7923870beb88a423067 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 1,202 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClosedListModelUpdateObject(Model):
"""Object model for updating a closed list.
:param sub_lists: The new sublists for the feature.
:type sub_lists:
list[~azure.cognitiveservices.language.luis.authoring.models.WordListObject]
:param name: The new name of the closed list feature.
:type name: str
"""
_attribute_map = {
'sub_lists': {'key': 'subLists', 'type': '[WordListObject]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ClosedListModelUpdateObject, self).__init__(**kwargs)
self.sub_lists = kwargs.get('sub_lists', None)
self.name = kwargs.get('name', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
661b70ba496cfd5771df2c917106168deccf0c0c | 11aaeaeb55d587a950456fd1480063e1aed1d9e5 | /.history/ex45-test_20190609121219.py | 2c116329785b368dc1068c42260ea98709da4189 | [] | no_license | Gr4cchus/Learn-Python-3-The-Hard-Way | 8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8 | f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d | refs/heads/master | 2020-05-17T23:18:29.483160 | 2019-06-26T18:42:52 | 2019-06-26T18:42:52 | 184,023,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py |
class Room1():
def enter():
print("You enter room 1")
class Room2():
def enter():
print("You enter room 2")
class Map():
def __init__(self, starting_room):
self.starting_room = starting_room
self.locations = {
'room1': Room1(),
'room2': Room2()
}
def returnit(self):
print(self.locations.get(self.starting_room))
return self.locations.get(self.starting_room)
class Engine():
def __init__(self, map):
self.map = map
def play(self):
while True:
print(self.map.returnit)
themap = Map('room1')
# theengine = Engine(themap)
# theengine.play()
print | [
"ahivent@gmail.com"
] | ahivent@gmail.com |
7b7c0674a9d8be762b5af182046205d28a4ae0c9 | eeb7e70b0b68decbdcb32682351e54e0be99a5b0 | /kaggle/python_files/sample453.py | ad3779f42d0ce7478cd0feb47ba8bbd3c057db7b | [] | no_license | SocioProphet/CodeGraph | 8bafd7f03204f20da8f54ab23b04f3844e6d24de | 215ac4d16d21d07e87964fe9a97a5bf36f4c7d64 | refs/heads/master | 2023-02-16T02:51:27.791886 | 2021-01-15T07:00:41 | 2021-01-15T07:00:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | #!/usr/bin/env python
# coding: utf-8
# *=== Please read also the comments below, in case Kagglers write updates to this post ===*
# <br>
# <br>
# If you're new to Kaggle kernels, you may wonder how to create an output file. Perhaps you have already run in your notebook a function like .to_csv, but **you don't see your file anywhere**? I had the same problem. **You need to commit your notebook**. There is a Commit button in the top-right corner of the main pane of your notebook.
# <br>
# <br>To create a file from scratch, step-by-step, please read on, or fork and execute this notebook.
# <br>
# <br>Let's say you started your first kernel based on Titanic dataset, by going to the <a href="https://www.kaggle.com/c/titanic/kernels">Kernels</a> tab and clicking New Kernel button. You would see something like this:
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# You are going to read the test set input file, make a very rough prediction (a simple rule "all females survive, no males survive"), and create a simple dataframe with results you would like to submit.
# In[ ]:
test = pd.read_csv('../input/test.csv')
test['Survived'] = 0
test.loc[test['Sex'] == 'female','Survived'] = 1
data_to_submit = pd.DataFrame({
'PassengerId':test['PassengerId'],
'Survived':test['Survived']
})
# Now that you have your dataframe, you would like to export it as a csv file, like this:
# In[ ]:
data_to_submit.to_csv('csv_to_submit.csv', index = False)
# Everything runs smoothly, but the problem is you can't see your file anywhere in this page, nor in your Profile, Kernels tab, nowhere! This is because you haven't commited your notebook yet. To do that, **click the Commit button** - as I write it, this is a light-blue button in the top-right corner of my notebook page, in the main pane. (There is also a right pane with Sessions, Versions etc. You can ignore it for now). It may take a minute for the Kaggle server to publish your notebook.
# <br>
# <br>When this operation is done, you can go back by clicking '<<' button in the top-left corner. Then you should see your notebook with a top bar that has a few tabs: Notebook, Code, Data, **Output**, Comments, Log ... Edit Notebook.
# Click the Output tab. You should see your output csv file there, ready to download!
| [
"mccusker@gmail.com"
] | mccusker@gmail.com |
711f21cc4cd6f0bf21f392cac68b5ceb4054c50e | ce0f8956c4c308c67bd700d31fe8d5a17b16ac08 | /Python3/src/15 Threading and Concurrency/Threading/05-sharing-data-3.py | d5ca22185670820f47a50c33bf733153a91ad842 | [] | no_license | seddon-software/python3 | 795ae8d22a172eea074b71d6cd49d79e388d8cc6 | d5e6db1509a25c1a3040d5ae82d757539a2ff730 | refs/heads/master | 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | ############################################################
#
# sharing data
#
############################################################
from threading import Thread
from threading import Lock
import random
import time
import sys
# use the with statement to create an implicit try block
# the lock will be released even if an exception is thrown
class MyClass:
def __call__(self, name):
global lock, count1, count2
for i in range(0, 2*1000*1000):
count1 += 1
with lock:
count2 += 1
lock = Lock()
count1 = 0
count2 = 0
m1 = MyClass()
m2 = MyClass()
m3 = MyClass()
t1 = Thread(target = m1, args = ("1",))
t2 = Thread(target = m2, args = ("2",))
t3 = Thread(target = m3, args = ("3",))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
print("count1: " + str(count1))
print("count2: " + str(count2))
print("\nEnd of main")
| [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
d86561bfdf7942d11d55ac36cc529fe5e11f5802 | 35a30c54193fc790b41085eb0e7b2fdd0e06dc91 | /basic_python/basics/more_db.py | cabe081d49542706ffab508e99935c99c77c89ae | [] | no_license | kooshanfilm/Python | fbf479da44565f1a2cba4ee78648bbe05892fc2b | e3a28631d7eeb8cf3ccc8f0f8ebb73d7c198df67 | refs/heads/master | 2023-02-19T16:29:28.724367 | 2022-02-19T22:11:56 | 2022-02-19T22:11:56 | 141,833,237 | 0 | 1 | null | 2023-02-07T22:27:49 | 2018-07-21T17:32:33 | Python | UTF-8 | Python | false | false | 1,468 | py | #!/usr/local/bin/python3.4
# By Amir Hassan Azimi [http://parsclick.net/]
import sqlite3
def insert(db, row):
db.execute('insert into test (t1, i1) values (?, ?)', (row['t1'], row['i1']))
db.commit()
def retrieve(db, t1):
cursor = db.execute('select * from test where t1 = ?', (t1,))
return cursor.fetchone()
def update(db, row):
db.execute('update test set i1 = ? where t1 = ?', (row['i1'], row['t1']))
db.commit()
def delete(db, t1):
db.execute('delete from test where t1 = ?', (t1,))
db.commit()
EMH
def disp_rows(db):
cursor = db.execute('select * from test order by t1')
for row in cursor:
print(' {}: {}'.format(row['t1'], row['i1']))
def main():
db = sqlite3.connect('test.db')
db.row_factory = sqlite3.Row
print('Create table test')
db.execute('drop table if exists test')
db.execute('create table test ( t1 text, i1 int )')
print('Create rows')
insert(db, dict(t1 = 'one', i1 = 1))
insert(db, dict(t1 = 'two', i1 = 2))
insert(db, dict(t1 = 'three', i1 = 3))
insert(db, dict(t1 = 'four', i1 = 4))
disp_rows(db)
print('Retrieve rows')
print(dict(retrieve(db, 'one')), dict(retrieve(db, 'two')))
print('Update rows')
update(db, dict(t1 = 'one', i1 = 101))
update(db, dict(t1 = 'three', i1 = 103))
disp_rows(db)
print('Delete rows')
delete(db, 'one')
delete(db, 'three')
disp_rows(db)
if __name__ == "__main__": main()
| [
"kooshanfilm@hotmail.com"
] | kooshanfilm@hotmail.com |
031d81b2d0c6f4baa7ca6095d7bbba85ae54328c | e5565fac8a04642fadf1da0d6bc54a3d0ea1c5a1 | /flask/auxiliar.py | e8803a60b6b61ca743db98ac7766b96ff2e9637d | [
"Unlicense"
] | permissive | ttm/pnud4 | 86b1c7988ae80f9ed9277dec838f62b16746dd85 | 89e1fd866dbdea7afcb3d1020816370e303f258c | refs/heads/master | 2021-01-02T23:07:58.939440 | 2014-09-15T22:50:06 | 2014-09-15T22:50:06 | 23,091,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,873 | py | #-*- coding: utf8 -*-
from SPARQLWrapper import SPARQLWrapper, JSON
from configuracao import *
import string, networkx as x, nltk as k
import __builtin__
stemmer = k.stem.RSLPStemmer()
def fazRedeAmizades():
global SPARQLWrapper
q="""SELECT ?a ?b ?aname ?bname
WHERE {
?a foaf:knows ?b .
}"""
sparql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
g=x.Graph()
for amizade in results["results"]["bindings"]:
nome1=amizade["a"]["value"]
nome2=amizade["b"]["value"]
g.add_edge(nome1,nome2)
__builtin__.g=g
def fazRedeInteracao():
q="""SELECT ?participante1 ?participante2 ?aname ?bname
WHERE {
?comentario dc:type tsioc:Comment.
?participante1 ops:performsParticipation ?comentario.
?participante1 foaf:name ?aname.
?artigo sioc:has_reply ?comentario.
?participante2 ops:performsParticipation ?artigo.
?participante2 foaf:name ?bname.
}"""
sparql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
d=x.DiGraph()
for interacao in results["results"]["bindings"]:
nome_chegada=interacao["participante1"]["value"]
nome_partida=interacao["participante2"]["value"]
if (nome_partida,nome_chegada) in d.edges():
d[nome_partida][nome_chegada]["weight"]+=1
else:
d.add_edge(nome_partida,nome_chegada,weight=1.)
__builtin__.d=d
def fazBoW():
"""Faz Bag of Words de todos os comentários e artigos do site"""
q="SELECT ?cbody ?titulo ?abody WHERE \
{?foo ops:performsParticipation ?participacao.\
OPTIONAL { ?participacao schema:articleBody ?abody. }\
OPTIONAL {?participacao dc:title ?titulo . }\
OPTIONAL {?participacao schema:text ?cbody .}}"
sparql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
msgs_=results["results"]["bindings"]
msgs=[mm for mm in msgs_ if ("titulo" not in mm.keys()) or
(("teste de stress" not in mm["titulo"]["value"].lower())
or ("cbody" not in mm.keys() or ("comunidade de desenvolvedores e nesse caso, quanto mais"
not in mm["cbody"]["value"].lower())))]
textos1=[i["cbody"]["value"] for i in msgs if "cbody" in i.keys()]
textos2=[i["abody"]["value"] for i in msgs if "abody" in i.keys()]
textos=textos1+textos2
# faz BoW e guarda num dict
texto=string.join(textos).lower()
texto_= ''.join(ch for ch in texto if ch not in EXCLUDE)
texto__=texto_.split()
#texto___=[stemmer.stem(pp) for pp in texto__]
texto___=[stemmer.stem(pp) for pp in texto__ if (pp not in STOPWORDS) and (not pp.isdigit())]
fdist=k.FreqDist(texto___)
radicais_escolhidos=fdist.keys()[:400]
__builtin__.radicais_escolhidos=radicais_escolhidos
__builtin__.bow=fdist
def fazBoWs():
"""Faz Bag of Words de cada usuário"""
# puxa todos os usuarios
q="""SELECT DISTINCT ?participante
WHERE {
?foo dc:contributor ?participante .
}"""
sparql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
participantes_=results["results"]["bindings"]
participantes=[i["participante"]["value"] for i in participantes_]
# inicia loop
if "radicais_escolhidos" not in dir(__builtin__):
print(u"rode BoW antes, para saber do vocabulário geral do portal")
else:
radicais_escolhidos=__builtin__.radicais_escolhidos
bows={}
for participante in participantes:
# puxa todos os comentarios de cada usuario
# e os article bodys
q="""SELECT DISTINCT ?abody ?cbody
WHERE {
<%s> ops:performsParticipation ?participacao.
OPTIONAL { ?participacao schema:articleBody ?abody. }
OPTIONAL { ?participacao schema:text ?cbody. }
OPTIONAL {?comentario dc:title ?titulo . }
}"""%(participante,)
sparql = SPARQLWrapper("http://localhost:82/participabr/query")
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results_=results["results"]["bindings"]
results__=[mm for mm in results_ if ("titulo" not in mm.keys()) or
(("teste de stress" not in mm["titulo"]["value"].lower())
or ("cbody" not in mm.keys() or ("comunidade de desenvolvedores e nesse caso, quanto mais"
not in mm["cbody"]["value"].lower())))]
textos1=[i["cbody"]["value"] for i in results__ if "cbody" in i.keys()]
textos2=[i["abody"]["value"] for i in results__ if "abody" in i.keys()]
textos=textos1+textos2
# faz BoW e guarda num dict
texto=string.join(textos).lower()
texto_= ''.join(ch for ch in texto if ch not in EXCLUDE)
texto__=texto_.split()
texto___=[stemmer.stem(pp) for pp in texto__ if pp not in STOPWORDS]
fdist=k.FreqDist(texto___)
ocorrencias=[fdist[i] for i in radicais_escolhidos]
bows[participante]=(fdist,ocorrencias)
__builtin__.bows=bows
def fazBoWsC():
"""Faz Bag of Words de cada comunidade
Por hora, há duas bag of words para cada comunidade:
*) Média das bag of words de cada participante
*) Bag of words de todos os textos da comunidade"""
if "bows" not in dir(__builtin__):
return "execute fazBoWs() primeiro"
# puxar participantes de cada comunidade
# fazer media dos bows deles
# puxar texto relacionado a cada comunidade
# fazer bow
| [
"renato.fabbri@gmail.com"
] | renato.fabbri@gmail.com |
29599983d99ecf7c0beb4b5ffa2d3fbad0885f2b | 897871d09b8b1e86c5a48599839ba9534260f2c9 | /aromawine3-new_update__with_checkout/payment_method/admin.py | 13faee6d60754243b61ec6690004079549f71e01 | [] | no_license | sidkushwah123/wine | 0b8b8fdf44068b4488d5f1ae5d34a24d3fff19a9 | bb29e84fb4a0709aca36e819ae6191147a9691b5 | refs/heads/main | 2023-07-27T14:03:06.814484 | 2021-09-11T15:25:39 | 2021-09-11T15:25:39 | 405,354,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from django.contrib import admin
from .models import AwPaymentMethod
from import_export.admin import ImportExportModelAdmin
# Register your models here.
class AwPaymentMethodAdmin(ImportExportModelAdmin):
list_display = ('Name_on_Card','Card_Number','Expiry_Date','CVC_CVV','ZIP','Created_by','Created_date')
list_filter = ('Created_by',)
admin.site.register(AwPaymentMethod,AwPaymentMethodAdmin) | [
"sachinkushwah0007@gmail.com"
] | sachinkushwah0007@gmail.com |
4577736c5d74a779f6b69196e1839be30b3b2d43 | a50e4fb1440b2c0638b3c72f343de62734caf194 | /healthybills/settings.py | 66abb14b0adf3810de757fa3ae652d7bbd188ec5 | [] | no_license | MarkyAaronYoung/healthy-bills-server | 168226ef60254021b501edf47d391a6a559488b3 | cd615e3a1940987ca58be07fa6a5b5cf60a1b110 | refs/heads/main | 2023-03-25T22:30:23.189937 | 2021-03-25T14:06:38 | 2021-03-25T14:06:38 | 345,149,493 | 0 | 0 | null | 2021-03-25T14:06:39 | 2021-03-06T17:08:21 | Python | UTF-8 | Python | false | false | 3,637 | py | """
Django settings for healthybills project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '32hc)(z++fr8m9fyz54&-h+_=5j3m2p76-vejcc+!&uih^(o2a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'healthybillsapi',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
'http://127.0.0.1:3000'
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'healthybills.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'healthybills.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"markyaaronyoung@gmail.com"
] | markyaaronyoung@gmail.com |
6ff072a6e2147a1cd4036db9c351a5c509cedd3d | 8ab2c7d0664a9c4b2d400f330e5516990eabd9a1 | /posts/migrations/0017_post_likes.py | 6ffa81a0505573735c6c29c9cc88883d8f598210 | [] | no_license | mahmud-sajib/Django-Blog | d335f759279feef712738126477bcb288e1aa5a9 | cba9f692dde1d50878cdfded4f36d1fd76bde253 | refs/heads/master | 2023-04-27T14:00:23.364276 | 2020-08-28T09:55:34 | 2020-08-28T09:55:34 | 240,199,312 | 2 | 1 | null | 2023-04-21T20:48:38 | 2020-02-13T07:13:52 | CSS | UTF-8 | Python | false | false | 421 | py | # Generated by Django 2.2 on 2020-03-07 04:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0016_auto_20200306_2033'),
]
operations = [
migrations.AddField(
model_name='post',
name='likes',
field=models.ManyToManyField(blank=True, related_name='likes', to='posts.Author'),
),
]
| [
"shout.mahmud@gmail.com"
] | shout.mahmud@gmail.com |
9aa21be925718892c331a7da4ca05d100cfe084e | 8c10c35b247ad417a0e3893b08a890e2c70055a5 | /backend/comedianModel/models.py | 4f5814e86a91c67a111940cec375c42147232207 | [] | no_license | crowdbotics-apps/test14-28665 | d06576c969ca12f92169ee0404562366777c2a50 | 182788839dd9b1149196428183ee91dfd67231a3 | refs/heads/master | 2023-06-19T02:45:40.322924 | 2021-07-09T16:55:29 | 2021-07-09T16:55:29 | 384,499,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from django.conf import settings
from django.db import models
class Comedians(models.Model):
"Generated Model"
name = models.CharField(
max_length=256,
)
class Jokes(models.Model):
"Generated Model"
joke = models.CharField(
max_length=256,
)
comedian = models.ForeignKey(
"comedianModel.Comedians",
on_delete=models.PROTECT,
related_name="jokes_comedian",
)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
49acfc58754069e15a3218eee7d216363410dc11 | e5dae0f531f3aa6cfd3c6c122a4bbf8b37872d46 | /check_apk.py | 15b73755b48bb5b01620a19cb6005787fad3ff1d | [] | no_license | Rafiot/stalkerware-indicators | cc67d0fa10c63e05ff8ece5024dfd45b2c917ef2 | eb832dcec7677adf739e73845dae285c3c8fd1cd | refs/heads/master | 2022-10-29T11:42:42.232554 | 2020-06-21T15:32:30 | 2020-06-21T15:32:30 | 273,949,498 | 1 | 0 | null | 2020-06-21T17:09:51 | 2020-06-21T17:09:51 | null | UTF-8 | Python | false | false | 3,282 | py | import argparse
import os
import sys
import yaml
import hashlib
from androguard.core.bytecodes.apk import APK
def load_indicators(file_path: str) -> dict:
indicators = {}
with open(os.path.join(file_path, 'appid.yaml')) as f:
indicators['appids'] = yaml.load(f, Loader=yaml.BaseLoader)
with open(os.path.join(file_path, 'certificates.yaml')) as f:
indicators['certificates'] = yaml.load(f, Loader=yaml.BaseLoader)
with open(os.path.join(file_path, 'network.csv')) as f:
data = f.read().split('\n')
indicators['network'] = []
for d in data:
dd = d.strip().split(',')
if dd[0] in ['domain', 'ip']:
indicators['network'].append({
'type': dd[0],
'value': dd[1],
'name': dd[2]
})
with open(os.path.join(file_path, 'sha256.csv')) as f:
data = f.read().split('\n')
indicators['sha256'] = []
for d in data:
dd = d.strip().split(',')
if dd[0] != 'Hash' and len(dd) == 2:
indicators['sha256'].append({
'value': dd[0],
'name': dd[1]
})
return indicators
def search(value: str, db: list, column: str) -> str:
for d in db:
if value.lower() == d[column].lower():
return d['name']
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Check an APK for known malicious indicators')
parser.add_argument('APK', help='APK File')
args = parser.parse_args()
if not os.path.isfile(args.APK):
print("This file does not exist")
sys.exit(-1)
indicator_path = os.path.dirname(os.path.abspath(__file__))
indicators = load_indicators(indicator_path)
print("Checking this APK over {} app ids, {} certificates, {} network indicators and {} hashes".format(len(indicators['appids']), len(indicators['certificates']), len(indicators['network']), len(indicators['sha256'])))
# TODO implement check for a folder
# Checking hash
m = hashlib.sha256()
with open(args.APK, 'rb') as f:
data = f.read()
m.update(data)
res = search(m.hexdigest(), indicators['sha256'], 'value')
print("SHA256: {}".format(m.hexdigest()))
if res:
print("Known Stalkerware hash: {}".format(res))
else:
print("App hash not in the indicator database")
print("")
apk = APK(args.APK)
res = search(apk.get_package(), indicators['appids'], 'package')
print("Package id: {}".format(apk.get_package()))
if res:
print("Known stalkerware package id: {}".format(res))
else:
print("Package id not in the indicators")
print("")
if len(apk.get_certificates()) > 0:
cert = apk.get_certificates()[0]
sha1 = cert.sha1_fingerprint.replace(' ', '')
print("Certificate: {}".format(sha1))
res = search(sha1, indicators['certificates'], 'certificate')
if res:
print("Known Stalkerware certificate: {}".format(res))
else:
print("Certificate not in the indicators")
else:
print("No certificate in this APK")
# TODO : add rules and androguard rules
| [
"tek@randhome.io"
] | tek@randhome.io |
e555059f4b9fad75ec4400c0decfcf6294982ccb | 6c137e70bb6b1b618fbbceddaeb74416d387520f | /pyqtgraph/examples/GLImageItem.py | 581474fd18d616485b254e71b69d1e8cde075c2a | [
"BSD-2-Clause",
"MIT"
] | permissive | zhong-lab/code | fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15 | b810362e06b44387f0768353c602ec5d29b551a2 | refs/heads/master | 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 | BSD-2-Clause | 2022-12-08T21:46:15 | 2019-05-02T23:37:39 | Python | UTF-8 | Python | false | false | 1,746 | py | # -*- coding: utf-8 -*-
"""
Use GLImageItem to display image data on rectangular planes.
In this example, the image data is sampled from a volume and the image planes
placed as if they slice through the volume.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 200
w.show()
w.setWindowTitle('pyqtgraph example: GLImageItem')
## create volume data set to slice three images from
shape = (100,100,70)
data = pg.gaussianFilter(np.random.normal(size=shape), (4,4,4))
data += pg.gaussianFilter(np.random.normal(size=shape), (15,15,15))*15
## slice out three planes, convert to RGBA for OpenGL texture
levels = (-0.08, 0.08)
tex1 = pg.makeRGBA(data[shape[0]/2], levels=levels)[0] # yz plane
tex2 = pg.makeRGBA(data[:,shape[1]/2], levels=levels)[0] # xz plane
tex3 = pg.makeRGBA(data[:,:,shape[2]/2], levels=levels)[0] # xy plane
#tex1[:,:,3] = 128
#tex2[:,:,3] = 128
#tex3[:,:,3] = 128
## Create three image items from textures, add to view
v1 = gl.GLImageItem(tex1)
v1.translate(-shape[1]/2, -shape[2]/2, 0)
v1.rotate(90, 0,0,1)
v1.rotate(-90, 0,1,0)
w.addItem(v1)
v2 = gl.GLImageItem(tex2)
v2.translate(-shape[0]/2, -shape[2]/2, 0)
v2.rotate(-90, 1,0,0)
w.addItem(v2)
v3 = gl.GLImageItem(tex3)
v3.translate(-shape[0]/2, -shape[1]/2, 0)
w.addItem(v3)
ax = gl.GLAxisItem()
w.addItem(ax)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| [
"none"
] | none |
30dc206da3ff7417c076c63484ea339e73e4c54f | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/tlm/data_gen/run_unmasked_pair_gen.py | c299a5acd7c979901fa33e9074b373f3d2b44a8e | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 1,378 | py | import random
import sys
from cache import *
from job_manager.marked_task_manager import MarkedTaskManager
from tlm.data_gen.lm_datagen import UnmaskedPairGen
working_path ="/mnt/nfs/work3/youngwookim/data/bert_tf"
class Worker:
def __init__(self, out_path):
self.out_dir = out_path
self.gen = UnmaskedPairGen()
def work(self, job_id):
doc_id = job_id
if doc_id > 1000:
doc_id = doc_id % 1000
docs = self.gen.load_doc_seg(doc_id)
output_file = os.path.join(self.out_dir, "{}".format(job_id))
insts = self.gen.create_instances_from_documents(docs)
random.shuffle(insts)
self.gen.write_instance_to_example_files(insts, [output_file])
def main():
mark_path = os.path.join(working_path, "unmasked_pair_x3_mark")
out_path = os.path.join(working_path, "unmasked_pair_x3")
if not os.path.exists(out_path):
os.mkdir(out_path)
mtm = MarkedTaskManager(4000, mark_path, 1)
worker = Worker(out_path)
job_id = mtm.pool_job()
print("Job id : ", job_id)
while job_id is not None:
worker.work(job_id)
job_id = mtm.pool_job()
print("Job id : ", job_id)
def simple():
out_path = os.path.join(working_path, "tf_unmasked")
worker = Worker(out_path)
worker.work(int(sys.argv[1]))
if __name__ == "__main__":
main()
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
1218803bf87eb7bc922d113422099a6153d4b9ae | 0e834094f5e4274b279939b81caedec7d8ef2c73 | /m1/project_mnth01/gemo2048.py | 843145d70d77f59201f2f88e40b483506471b774 | [] | no_license | SpringSnowB/All-file | b74eaebe1d54e1410945eaca62c70277a01ef0bf | 03485c60e7c07352aee621df94455da3d466b872 | refs/heads/master | 2020-11-27T23:54:36.984555 | 2020-01-21T08:42:21 | 2020-01-21T08:42:21 | 229,651,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | """
2048 核心算法
"""
#1.定义函数,零元素移动到末尾
__list_merge = [0, 0, 2, 2]
def move_zero():
""" 零元素移动到末尾
"""
while __list_merge[0] ==0:
for i in range(len(__list_merge) - 1):
if __list_merge[i] == 0:
__list_merge[i], __list_merge[i + 1] = __list_merge[i + 1], __list_merge[i]
# for j in range(i+1,len(list_merge)-1):
# list_merge[j-1],list_merge[j] = list_merge[j],list_merge[j-1]
else:
for i in range(len(__list_merge) - 1):
if __list_merge[i] == 0:
__list_merge[i], __list_merge[i + 1] = __list_merge[i + 1], __list_merge[i]
# for j in range(i+1,len(list_merge)-1):
# list_merge[j-1],list_merge[j] = list_merge[j],list_merge[j-1]
def merge_same_element():
"""
合并相同元素
"""
move_zero()
for i in range(len(__list_merge) - 1):
if __list_merge[i] == __list_merge[i + 1]:
__list_merge[i + 1] += __list_merge[i]
__list_merge[i] = 0
move_zero()
merge_same_element()
print(__list_merge)
list_map = [
[2,0,2,0],
[2,4,0,2],
[0,0,2,0],
[2,4,4,2]
]
def left_move():
"""
左移
"""
for i in range(len(list_map)):
global __list_merge
list_merge = list_map[i]
merge_same_element()
def right_move():
"""
右移
"""
for i in range(len(list_map)):
__list_merge[:] = list_map[i][::-1]
merge_same_element()
list_map[i][::-1] = __list_merge[:]
def transposition_matrix():
for r in range(len(list_map)):
for c in range(r):
if r != c:
list_map[r][c], list_map[c][r] = list_map[c][r], list_map[r][c]
def up_move():
transposition_matrix()
left_move()
transposition_matrix()
def down_move():
transposition_matrix()
right_move()
transposition_matrix()
left_move()
print(list_map)
right_move()
print(list_map)
up_move()
print(list_map)
down_move()
print(list_map) | [
"tszxwsb@163.com"
] | tszxwsb@163.com |
2d8fcd0850a499bb52741a35c6b388c439e2c882 | 13f5c66af02a64aa8c5d988e9560b82bcf058fd0 | /use_a_cabeca_python/AthleteList.py | e10bd9802bb655276a7b55d79046f4a6ca44901a | [] | no_license | heitorchang/reading-list | a1090b969d0f16cbc7c0e371671e85dca0bde201 | 3dcfd68cb02179e75216ff459fda693ec1fb8684 | refs/heads/master | 2023-04-27T03:04:28.122341 | 2023-04-21T14:04:20 | 2023-04-21T14:04:20 | 67,825,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | class AthleteList(list):
def __init__(self, a_name, a_dob=None, a_times=[]):
# list.__init__([]) # appears to be optional
self.name = a_name
self.dob = a_dob
self.extend(a_times)
def top3(self):
return sorted(set(self))[:3]
def test():
k = AthleteList("Kelly", "1997-11-15")
testeql(k.top3(), [])
j = AthleteList("John", "2002-01-29", ["2:01", "3:15", "2:02", "5:22"])
testeql(j.top3(), ["2:01", "2:02", "3:15"])
a1 = AthleteList("A", "", ["1.0", "2.0", "3.0"])
a2 = AthleteList("B", "", ["2.0", "3.0", "4.0"])
all_ath = {}
all_ath[a1.name] = a1
all_ath[a2.name] = a2
ath_name = "A"
print(all_ath[ath_name].top3())
| [
"heitorchang@gmail.com"
] | heitorchang@gmail.com |
5c6e3ec0223dac4075998a312d7139df867bec08 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03724/s824484588.py | 8979fbff9f609574d28ec62f6dbe789a65c69b5a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import collections
N, M = map(int,input().split())
A = [0] * M
B = [0] * M
for i in range(M):
A[i], B[i] = input().split()
C = A + B
D = collections.Counter(C)
for i in D:
if D[i] % 2 == 0:
continue
else:
print('NO')
exit()
print('YES')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
62d6fe75fdf5cb31098bb87c9e4a23b50b00b62e | 75f7dba3115d6e771ff16f8a227289b35c80f00d | /views/second_category_list_table_model.py | 376712fd9653c532ff5456f01ba78c82675ebedd | [] | no_license | ltnghia/video-object-annotation-interface | e801e8c1f0b472af98ede9646025328530dfcc5f | f7b363efe20423a6b1dfa455df2743b71b8d8905 | refs/heads/master | 2023-01-09T17:24:25.973062 | 2020-02-19T13:19:53 | 2020-02-19T13:19:53 | 199,964,408 | 2 | 2 | null | 2022-12-27T15:35:29 | 2019-08-01T02:50:24 | Python | UTF-8 | Python | false | false | 1,552 | py | from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from views.base_table_model import BaseTableModel
class SecondCategoryListTableModel(BaseTableModel):
def __init__(self, parent=None):
QAbstractTableModel.__init__(self, parent)
self.header_labels = ['ID', 'Second Category']
self.table_data = []
# self.table = None
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self.header_labels[section]
return QAbstractTableModel.headerData(self, section, orientation, role)
def columnCount(self, parent):
return len(self.header_labels)
def rowCount(self, parent):
return len(self.table_data)
def clear(self):
self.table_data = []
self.layoutChanged.emit()
def append(self, id, name):
self.table_data.append({'id': id,
'name': name})
self.layoutChanged.emit()
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
row = index.row()
col = index.column()
data_row = self.table_data[row]
if role == Qt.DisplayRole:
content = ''
if col == 0:
content = data_row['id']
elif col == 1:
content = data_row['name']
return content
return QVariant()
def setData(self, index, value, role):
return False
| [
"trungnghiadk@gmail.com"
] | trungnghiadk@gmail.com |
bc9ee56a9c570d8e46fb2b8ba05a4b5994a2eed5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02830/s301067674.py | dcfb17d8140ff77e16201b26096c1ad052528e47 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | n = int(input())
a, b = map(list, input().split())
if len(a) == len(b):
for i in range(0, len(a)):
print(a[i] + b[i], end = "") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e9fab5abba7eff5c66dd8e49ed4e4d3ff5dde702 | 1e18d62167e3924079dd34555253fa11cd8f2d23 | /practice02.py | 3e3e568bea0c0c8f9c20f673477f761606a1549e | [] | no_license | hoyeonkim795/image_processing_project | e19fcc6022095d565da2ef86533c1d29159f5195 | 8ec401f8c1c34fa4cda3f53ba1552b6b329fe564 | refs/heads/master | 2022-11-09T20:50:37.082455 | 2020-06-29T06:43:31 | 2020-06-29T06:43:31 | 275,752,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | import cv2
import numpy as np
img = cv2.imread('/home/kriskim/Documents/Python/eye.jpg',0)
img = cv2.medianBlur(img,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,150,
param1=80,param2=50,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('/home/kriskim/Documents/Python/circled_eye.png',cimg)
cv2.destroyAllWindows() | [
"hy940313@gmail.com"
] | hy940313@gmail.com |
1d17bf27e8a12df36303623b6f782d166b6fb889 | 1d60c68f79072236ddb4ef27ed882b4d6e4c1034 | /demo/loops/break_demo.py | bab06e3b9f8edfc6b9d650a1dc69223cee369820 | [] | no_license | srikanthpragada/PYTHON_13_OCT_2020 | 21e9c9ba86fbdd0858d19f38fad20ee2b7786310 | 183fb465d93a6bd3a703479936c1c972f3681f24 | refs/heads/master | 2023-01-19T14:45:46.969376 | 2020-11-25T14:26:50 | 2020-11-25T14:26:50 | 304,349,928 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | total = 0
for i in range(1, 6):
num = int(input("Enter number [0 to stop]:"))
if num == 0:
break # Terminate loop
total += num
print(total)
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
8c184805763226fa12e2f8874c3f3cef738b840d | acfab9011d276323ce4aa24075aee35f470c17b8 | /9.DictionaryandSets/79.2.Dictionary.py | 07b944ca48d73d04d2d7bbbf314a042f7ea2798a | [] | no_license | Engi20/Python-Programming-for-Beginners | ad84d66a5ce9dd4e6ab23acde13f74607b87ead1 | fa02fcd265f8d7145e554267435c7e73ed562e36 | refs/heads/master | 2022-06-07T05:56:17.326070 | 2020-05-02T17:41:20 | 2020-05-02T17:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | d={'USA':100,'UK':200,'India':300}
#d1 = d + {'Aus':400} Cannot Use
#d.insert('Aus',400) #AttributeError
#d.append('Aus',400) #AttributeError
#d.extend({'Aus':400}) #AttributeError
#print(d[0:2]) #TypeError
print(d)
del(d['UK'])
print(d)
#d.sort() #AttributeError
| [
"noreply@github.com"
] | Engi20.noreply@github.com |
0b397781d98083fbe0cf7e16940847ffc73a0c3e | 06eb243dd14f54445d9d09a2cf9356b398ce5cc8 | /shoe_sale/settings.py | 158251dcdb5166d1aa5e254f6af65f121d0a4d1c | [] | no_license | wgoode3/shoes | 6c4c0ff97c343b65358bda96aff1d7d477d68f6d | 38571a15b81a1c12df859fd51d058639c0c63dd6 | refs/heads/master | 2020-03-23T15:28:49.307455 | 2018-07-20T21:48:48 | 2018-07-20T21:48:48 | 141,751,041 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | """
Django settings for shoe_sale project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '60p631$#9a=azfhv^n3rf(&0duf1%wtr3_2jlzmvciph)p(bj('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.shoe_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shoe_sale.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shoe_sale.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"wgoode3@gmail.com"
] | wgoode3@gmail.com |
cdf68b3e52046785fd22b375763373f8f35affb6 | 96724f89fc16ec2b73896946d7a44e0532618ba6 | /Iwant/plugin.py | 0ff92d98d8a600f03ed7fcd1f1ded0d43d842398 | [] | no_license | Azelphur/Supybot-plugins | d1448a8b7d3281b5ce7e1a25d9956a3dcd908200 | 063f3475022ddd9834b94564bb9bb499aee6b503 | refs/heads/master | 2020-05-20T13:28:40.213224 | 2012-06-27T12:20:01 | 2012-06-27T12:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,818 | py | ###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import random
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Iwant')
def unserialize(string):
list = string.replace('||', '|').split(' | ')
if '' in list:
list.remove('')
return list
def serialize(list):
if '' in list:
list.remove('')
return ' | '.join([x.replace('|', '||') for x in list])
@internationalizeDocstring
class Iwant(callbacks.Plugin):
"""Add the help for "@plugin help Iwant" here
This should describe *how* to use this plugin."""
@internationalizeDocstring
def iwant(self, irc, msg, args, channel, thing):
"""[<channel>] <thing>
Tell the bot you want the <thing>. <channel> is only needed if you
don't send the message on the channel itself."""
wishlist = unserialize(self.registryValue('wishlist', channel))
if thing in wishlist:
irc.error(_('This thing is already wanted.'))
return
wishlist.append(thing)
self.setRegistryValue('wishlist', serialize(wishlist), channel)
irc.replySuccess()
iwant = wrap(iwant, ['channel', 'something'])
@internationalizeDocstring
def list(self, irc, msg, args, channel):
"""[<channel>]
Returns the list of wanted things for the <channel>. <channel> defaults
to the current channel."""
wishlist = unserialize(self.registryValue('wishlist', channel))
if list(wishlist) == 0:
irc.error(_('No wish for the moment.'))
return
indexes = range(1, len(wishlist) + 1)
wishlist_with_index = zip(indexes, wishlist)
formatted_wishlist = [_('#%i: %s') % x for x in wishlist_with_index]
irc.reply(utils.str.format('%L', formatted_wishlist))
list = wrap(list, ['channel'])
@internationalizeDocstring
def get(self, irc, msg, args, channel, id):
"""[<channel>] <id>
Tell you the thing number <id>. <channel> is only needed if you
don't send the message on the channel itself."""
wishlist = unserialize(self.registryValue('wishlist', channel))
if len(wishlist) < id:
irc.error(_('No thing has this id.'))
return
irc.reply(_('Wish #%i is %s.') % (id, wishlist[id - 1]))
get = wrap(get, ['channel', 'id'])
@internationalizeDocstring
def random(self, irc, msg, args, channel):
"""[<channel>]
Tell you a random thing. <channel> is only needed if you
don't send the message on the channel itself."""
wishlist = unserialize(self.registryValue('wishlist', channel))
if list(wishlist) == 0:
irc.error(_('No wish for the moment.'))
return
indexes = range(1, len(wishlist) + 1)
wishlist_with_index = zip(indexes, wishlist)
wish = random.sample(wishlist_with_index, 1)[0]
irc.reply(_('Wish #%i is %s.') % wish)
random = wrap(random, ['channel'])
Class = Iwant
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| [
"progval@gmail.com"
] | progval@gmail.com |
4dd67cedb2fcdaea1d23cfffdaee40bd297ed2ca | 43d4b962a83dac734dfb09b8523fdfcfcc6628c1 | /top_cliente/migrations/0011_auto_20190108_1123.py | a99a0b5f821b487044361a1ed6e31198bf0f69e0 | [] | no_license | redcliver/sistemas | 01edd98c2814eee50550010169b2c7594e5256f5 | 1129c9516c57fbf53ce3cf5e0e5feb3835d3e9df | refs/heads/master | 2020-04-07T17:23:04.809752 | 2019-05-02T16:24:18 | 2019-05-02T16:24:18 | 158,567,651 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-08 14:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('top_cliente', '0010_auto_20190107_1215'),
]
operations = [
migrations.AddField(
model_name='cliente',
name='responsavel',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='cliente_portabilidade',
name='responsavel',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"igor-peres@hotmail.com"
] | igor-peres@hotmail.com |
eb07d2424a264b5eb6f3ace30dd67c0b523d36d7 | f4f0a38b624badf71628b3d908732b0ac271a429 | /src/py_playground/graphene/sqlalchemy/model.py | d9e37a331cf3c9a31338498cd0f7b93b7922e0b6 | [] | no_license | gy-chen/py_playground | d2e5cbe803aacf12f777d39cc276dc1bb874dd0b | 5b0fbf42f67ac1cae5ee40f2209513d182669180 | refs/heads/master | 2021-01-23T05:19:10.862126 | 2019-04-24T12:45:10 | 2019-04-24T12:45:10 | 86,298,726 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import os
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine(os.getenv('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:'), echo=True)
Base = declarative_base(bind=engine)
session = scoped_session(sessionmaker(bind=engine))
Base.query = session.query_property()
class Greeting(Base):
__tablename__ = 'greeting'
id = Column(Integer, primary_key=True)
greeting = Column(String)
name = Column(String)
Base.metadata.create_all()
| [
"gy.chen@gms.nutc.edu.tw"
] | gy.chen@gms.nutc.edu.tw |
90264299de42b7c729ca7e36729da524020f09a6 | 1f1c75f93692453e7fa944106ea7ed5a6f69d53c | /Day2/LoginTest.py | 0b793b239071b06a1ac550ef5553c8cc8a5ace94 | [] | no_license | caotya/selenium7th | 6e286b70f9430872f57e363595261c36ec9cd820 | 9155fa03763d60afc98d8241a4a3f2077375d6f3 | refs/heads/master | 2020-03-21T10:42:46.092006 | 2018-06-24T08:46:06 | 2018-06-24T08:46:06 | 138,466,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | #1.打开浏览器
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
class login():
def loginWithDefaultUser(self,driver):
#driver = webdriver.Chrome()
driver.implicitly_wait(20)
#2.打开海盗商城网站
driver.get("http://localhost/")
#3.删除登陆链接的target属性
#在python中字符串可以用单引号,也可以使用双引号
#如果字符串本身包含双引号,两边使用单引号
driver.execute_script('document.getElementsByClassName("site-nav-right fr")[0].childNodes[1].removeAttribute("target")')
#4.点击登陆按钮,跳转到登陆页面
driver.find_element_by_xpath('/html/body/div[1]/div[1]/div[1]/div[2]/a[1]').click()
#5.输入用户名
driver.find_element_by_id("username").send_keys("cty")
#6.输入密码
#ActionChains需要导包,导包快捷键alt+enter
#action是动作行为的意思,chains是链表的意思,链表类似于数组
#所以ActionChains是一组动作和行为的意思
#下面这句话的意思是实例化一个ActionChains这个类的对象
#这个对象可以用来执行一组动作和行为
action=ActionChains(driver)
#所有的actions方法都要以perform()方法结尾才能被执行
action.send_keys(Keys.TAB).send_keys("123456").perform()
#7.点击登陆按钮
action.send_keys(Keys.ENTER).perform()
#加入不支持回车键登陆,我们可以直接点击登陆按钮
#加入也很难定位登陆系统,我还可以用submit()方法
#submit是提交的意思,用于提交表单
#想象一下,用户名和密码等信息是不是同时发送给后端服务端?
#开发通过form表单把这些信息同时发送给后台 | [
"51Testing"
] | 51Testing |
8fab3fb2c510162b5127a40f03644fd6193110e5 | f9f05cf26b8fd4b23a7dc907523cc2c66ab750a0 | /day02/test_008.py | b3db10ad2a2aa0e17664a1a3a3c44310c537725e | [] | no_license | wuxing1314/APIAuto-Test | 59c4a114be3dc93cfba13c0157acd83830807658 | 3c3ab0d019a3f8d41a06ea62a13174eaac94555e | refs/heads/main | 2023-03-13T14:44:24.464393 | 2021-02-24T10:43:45 | 2021-02-24T10:43:45 | 341,841,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | '''
fixtrue带参数
'''
import pytest
# 5组测试数据,表示不同的用户名
@pytest.fixture(params=["root", "admin", "administrator", "123", "Test-123"])
def login_data(request): # 固定写法,request是pytest中的关键字
return request.param # 固定写法
# 使用5组数据分别执行这个用例,共执行5次。
def test_login(login_data):
print(login_data)
# format,
print(f"测试登录功能,使用用户名:{login_data}登录")
print("测试登录功能,使用用户名:%s登录"%login_data)
@pytest.fixture(params=[{"username":"root", "pwd":"123456"}, {"username":"admin", "pwd":"666666"}])
def login_data2(request): # 固定写法,request是pytest中的关键字
return request.param # 固定写法
def test_login2(login_data2):
print(login_data2) #字典
print(f"======================测试登录功能,使用用户名:{login_data2['username']},密码:{login_data2['pwd']}登录")
| [
"wuxing1314@aliyun.com"
] | wuxing1314@aliyun.com |
d4c32b4b746774ecff46664b0dd4d7aa9138c14e | 33ddc0a7e904b53eacf2170856699ae91fe8aa47 | /decim/immuno_scripts/imshow_stan_valid_onefixed.py | a96fbbf06274086b1a0288bcd68f194ee4ea8ece | [] | no_license | nwilming/decim | 6670dd460ce15b65c433b25b6a983236b92474c0 | e593f171d132afa3bcf4959457c320d0b35f3c77 | refs/heads/master | 2020-03-08T03:24:36.986437 | 2018-12-11T17:03:38 | 2018-12-11T17:03:38 | 106,668,018 | 0 | 0 | null | 2017-10-12T08:48:37 | 2017-10-12T08:48:37 | null | UTF-8 | Python | false | false | 5,324 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from glob import glob
from os.path import join
import decim.statmisc as ds
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
sns.set(style='ticks', font_scale=1, rc={
'axes.labelsize': 20,
'axes.titlesize': 20,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'legend.fontsize': 5,
'axes.linewidth': 2,
'xtick.major.width': 2,
'ytick.major.width': 2,
'ytick.major.pad': 2.0,
'ytick.minor.pad': 2.0,
'xtick.major.pad': 2.0,
'xtick.minor.pad': 2.0,
'axes.labelpad': 4.0,
})
files = glob(join('/Users/kenohagena/Documents/immuno/da/analyses/Bayes_Glaze_Model_Apr18/lisa_valid_singlefx_bothvary_Apr18/', '*.csv'))
fileframe = []
for file in files:
cond = file[file.find('samples_'):file.find('fix_')][8:]
df = pd.read_csv(file)
H = ds.mode(df.H.values, 100)
if cond == 'gv':
V = ds.mode(df.V.values, 100)
gen_var = np.nan
else:
V = np.nan
gen_var = ds.mode(df.gen_var.values, 100)
true_H = float(file[file.find('H='):file.find('gv=')][2:])
true_V = float(file[file.find('V='):file.find('H=')][2:])
true_gv = float(file[file.find('gv='):file.find('.csv')][3:])
fileframe.append({'H': H, 'V': V, 'gen_var': gen_var,
'true_H': true_H, 'true_V': true_V,
'true_gen_var': true_gv,
'fixed': cond})
df = pd.DataFrame(fileframe)
vf = df.loc[df.fixed == 'gv']
vim = vf.loc[:, ['V', 'true_H', 'true_V']]
vimz = vim
vimz.true_V = (vim.true_V - vim.V.mean()) / vim.V.std()
vimz.V = (vim.V - vim.V.mean()) / vim.V.std()
vimz['value'] = vimz.true_V - vimz.V
# vimz.value = vimz.value.abs()
vimz = vimz.drop('V', axis=1).groupby(['true_H', 'true_V']).mean().\
reset_index(level=['true_H', 'true_V'])
vim = vimz.pivot(index='true_H', columns='true_V', values='value')
hvim = vf.loc[:, ['H', 'true_H', 'true_V']]
hvimz = hvim
hvimz.true_H = (hvim.true_H - hvim.H.mean()) / hvim.H.std()
hvimz.H = (hvim.H - hvim.H.mean()) / hvim.H.std()
hvimz['value'] = hvimz.true_H - hvimz.H
# hvimz.value = hvimz.value.abs()
hvimz = hvimz.drop('H', axis=1).groupby(['true_H', 'true_V']).mean().\
reset_index(level=['true_H', 'true_V'])
hvim = hvimz.pivot(index='true_H', columns='true_V', values='value')
gvf = df.loc[df.fixed == 'v']
gvim = gvf.loc[:, ['gen_var', 'true_H', 'true_gen_var']]
gvimz = gvim
gvimz.true_gen_var = (gvim.true_gen_var - gvim.gen_var.mean()) /\
gvim.gen_var.std()
gvimz.gen_var = (gvim.gen_var - gvim.gen_var.mean()) / gvim.gen_var.std()
gvimz['value'] = gvimz.true_gen_var - gvimz.gen_var
# gvimz.value = gvimz.value.abs()
gvimz = gvimz.drop('gen_var', axis=1).groupby(['true_H', 'true_gen_var']).\
mean().reset_index(level=['true_H', 'true_gen_var'])
gvim = gvimz.pivot(index='true_H', columns='true_gen_var', values='value')
hgvim = gvf.loc[:, ['H', 'true_H', 'true_gen_var']]
hgvimz = hgvim
hgvimz.true_H = (hgvim.true_H - hgvim.H.mean()) / hgvim.H.std()
hgvimz.H = (hgvim.H - hgvim.H.mean()) / hgvim.H.std()
hgvimz['value'] = hgvimz.true_H - hgvimz.H
# hgvimz.value = hgvimz.value.abs()
hgvimz = hgvimz.drop('H', axis=1).groupby(['true_H', 'true_gen_var']).mean().\
reset_index(level=['true_H', 'true_gen_var'])
hgvim = hgvimz.pivot(index='true_H', columns='true_gen_var', values='value')
f, ax = plt.subplots(2, 2, figsize=(10, 12))
vmin = -2
vmax = 2
f.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0, hspace=.2)
im1 = ax[0, 0].imshow(hvim.sort_index(ascending=False),
cmap='BrBG', vmin=vmin, vmax=vmax)
im2 = ax[1, 0].imshow(vim.sort_index(ascending=False),
cmap='BrBG', vmin=vmin, vmax=vmax)
im3 = ax[0, 1].imshow(hgvim.sort_index(ascending=False),
cmap='BrBG', aspect=.7, vmin=vmin, vmax=vmax)
im4 = ax[1, 1].imshow(gvim.sort_index(ascending=False),
cmap='BrBG', aspect=.7, vmin=vmin, vmax=vmax)
ax[0, 0].set(yticks=[0, 5, 10], yticklabels=[0.45, .2, .01],
xticks=[], xticklabels=[],
ylabel='H', title='mean error of fitted H')
ax[1, 0].set(yticks=[0, 5, 10], yticklabels=[0.45, .2, .01],
xticks=[0, 4, 8], xticklabels=[1, 3, 5],
ylabel='H', xlabel='V', title='mean error of fitted V')
ax[0, 1].set(yticks=[], yticklabels=[],
xticks=[], xticklabels=[],
title='mean error of fitted H')
ax[1, 1].set(yticks=[], yticklabels=[],
xticks=[0, 2, 4], xticklabels=[1, 2, 3],
xlabel='gen-var', title='mean error of fitted gen_var')
axins = inset_axes(ax[1, 1],
width="10%",
height="220%",
loc=3,
bbox_to_anchor=(1.2, 0., 1, 1),
bbox_transform=ax[1, 1].transAxes,
borderpad=0
)
plt.colorbar(im1, cax=axins, ticks=[vmin, 0, vmax])
sns.despine(bottom=True, ax=ax[0, 0])
sns.despine(bottom=True, left=True, ax=ax[0, 1])
sns.despine(ax=ax[1, 0])
sns.despine(left=True, ax=ax[1, 1])
f.savefig('imshow_validation_fits.png', dpi=160)
| [
"kenohagena@gmail.com"
] | kenohagena@gmail.com |
b1d1dc32f4381bf42dfc0d9f8c7447b72a0e98fe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02616/s927139462.py | cdcf6e7467f81108f9611486873420b5b172324c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | MOD = 10 ** 9 + 7
N, K = map(int, input().split())
l = list(map(int, input().split()))
pos = []
neg = []
z = 0
for v in l:
if v > 0:
pos.append(v)
elif v < 0:
neg.append(v)
else:
z += 1
pos.sort()
neg.sort()
if N - z < K:
print(0)
elif N - z == K:
if len(neg) % 2 and z:
print(0)
else:
out = 1
for v in neg + pos:
out *= v
out %= 10 ** 9 + 7
print(out)
elif len(pos) == 0:
if K % 2:
if z:
print(0)
else:
neg.reverse()
out = 1
for i in range(K):
out *= neg[i]
out %= 10 ** 9 + 7
print(out)
else:
out = 1
for i in range(K):
out *= neg[i]
out %= 10 ** 9 + 7
print(out)
else:
out = 1
neg.reverse()
if K % 2:
out = pos.pop()
K -= 1
while K:
if len(pos) >= 2:
nP = pos[-1] * pos[-2]
else:
nP = 0
if len(neg) >= 2:
nN = neg[-1] * neg[-2]
else:
nN = 0
if nP > nN:
pos.pop()
pos.pop()
out *= nP
else:
neg.pop()
neg.pop()
out *= nN
out %= MOD
K -= 2
print(out)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a8839f781f7fcf40aeace3aebbce531b846de372 | 845e20f1cfa0980244f68643577dd7ca909194b5 | /drf_auth/views.py | e6a2d2c191d8d53b6ec245b11ec3158c124c6230 | [
"MIT"
] | permissive | TheArtling/django-drf-auth | 919735b138572287ba6b7b05cef345679638267e | 732183fae6cb129a1bf5c0c619fbd76e366ab392 | refs/heads/master | 2020-07-25T18:41:09.969468 | 2016-11-29T10:02:23 | 2016-11-29T10:02:23 | 73,772,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | """Views for the drf_auth app."""
from django.views.generic import TemplateView
class FinishSignupView(TemplateView):
template_name = 'drf_auth/finish_signup.html'
| [
"mbrochh@gmail.com"
] | mbrochh@gmail.com |
ae15be2cb00e3b167a3e5ed07674069074de2c2e | f162f030660fa1529cb339fb325b57eceb3e112c | /zinnia/tests/__init__.py | 79bc4626f752f615fa603bdf5ecedfafea63c1fb | [
"BSD-3-Clause"
] | permissive | jbazik/django-blog-zinnia | f1d4b17e36d9b5b27982d7bcdc6c926adf9baac4 | ed18a76887b934eb697be94dc7cf5bd3c866dc87 | refs/heads/master | 2021-01-15T20:38:14.435222 | 2013-06-01T13:28:26 | 2013-06-01T13:28:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,238 | py | """Unit tests for Zinnia"""
from unittest import TestSuite
from unittest import TestLoader
from zinnia.tests.entry import EntryTestCase
from zinnia.tests.entry import EntryHtmlContentTestCase
from zinnia.tests.entry import EntryAbsoluteUrlTestCase
from zinnia.tests.models_bases import LoadModelClassTestCase
from zinnia.tests.signals import SignalsTestCase
from zinnia.tests.category import CategoryTestCase
from zinnia.tests.admin import EntryAdminTestCase
from zinnia.tests.admin import CategoryAdminTestCase
from zinnia.tests.managers import ManagersTestCase
from zinnia.tests.feeds import ZinniaFeedsTestCase
from zinnia.tests.views import ZinniaViewsTestCase
from zinnia.tests.views import ZinniaCustomDetailViews
from zinnia.tests.pingback import PingBackTestCase
from zinnia.tests.metaweblog import MetaWeblogTestCase
from zinnia.tests.comparison import ComparisonTestCase
from zinnia.tests.quick_entry import QuickEntryTestCase
from zinnia.tests.sitemaps import ZinniaSitemapsTestCase
from zinnia.tests.ping import DirectoryPingerTestCase
from zinnia.tests.ping import ExternalUrlsPingerTestCase
from zinnia.tests.templatetags import TemplateTagsTestCase
from zinnia.tests.moderator import EntryCommentModeratorTestCase
from zinnia.tests.spam_checker import SpamCheckerTestCase
from zinnia.tests.url_shortener import URLShortenerTestCase
from zinnia.tests.long_enough import LongEnoughTestCase
from zinnia.tests.mixins import MixinTestCase
from zinnia.tests.author import AuthorTestCase
from zinnia.tests.admin_filters import AuthorListFilterTestCase
from zinnia.tests.admin_filters import CategoryListFilterTestCase
from zinnia.tests.flags import FlagsTestCase
from zinnia.tests.translated_urls import TranslatedURLsTestCase
from zinnia.tests.markups import MarkupsTestCase
from zinnia.tests.markups import FailImportMarkupTestCase
from zinnia.signals import disconnect_entry_signals
from zinnia.signals import disconnect_discussion_signals
def suite():
"""Suite of TestCases for Django"""
suite = TestSuite()
loader = TestLoader()
test_cases = (ManagersTestCase, EntryTestCase,
LoadModelClassTestCase, SignalsTestCase,
EntryHtmlContentTestCase, CategoryTestCase,
ZinniaViewsTestCase, ZinniaFeedsTestCase,
ZinniaSitemapsTestCase, ComparisonTestCase,
DirectoryPingerTestCase, ExternalUrlsPingerTestCase,
TemplateTagsTestCase, QuickEntryTestCase,
URLShortenerTestCase, EntryCommentModeratorTestCase,
ZinniaCustomDetailViews, SpamCheckerTestCase,
EntryAdminTestCase, CategoryAdminTestCase,
MixinTestCase, LongEnoughTestCase,
AuthorTestCase, FlagsTestCase,
AuthorListFilterTestCase, CategoryListFilterTestCase,
TranslatedURLsTestCase, EntryAbsoluteUrlTestCase,
MarkupsTestCase, FailImportMarkupTestCase,
PingBackTestCase, MetaWeblogTestCase)
for test_class in test_cases:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
disconnect_entry_signals()
disconnect_discussion_signals()
| [
"fantomas42@gmail.com"
] | fantomas42@gmail.com |
e7d036db641c2fbed1c78e31507a4b3f732631ed | 6057b6a85760902950479fe8fdbd06ad4d72f0bb | /Modules/BotCore/Nick_Commands/BotNick_Gitpull.py | 3b6237aa4cc942055e26ecb9cd4d37ad6261b53b | [] | no_license | SpiceBot/SpiceBot.old | c4d1672c5552dd4fa43988b0288e29d1652ca399 | a8dbfe4406d84dcef0a0449fc0f5ee4c4a880166 | refs/heads/master | 2020-05-18T13:12:06.067617 | 2019-06-17T16:13:51 | 2019-06-17T16:13:51 | 184,431,450 | 0 | 1 | null | 2019-06-17T16:13:52 | 2019-05-01T14:48:44 | Python | UTF-8 | Python | false | false | 1,581 | py | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
# sopel imports
import sopel.module
# imports for system and OS access, directories
import os
import sys
# imports based on THIS file
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
comdict = {
"author": "deathbybandaid",
"contributors": [],
"description": "",
'privs': ['admin', 'OP'],
"example": "",
"exampleresponse": "",
}
"""
This will pull a directory from github
"""
@module.nickname_commands('gitpull')
@module.thread(True)
def bot_command_hub(bot, trigger):
botcom = botcom_nick(bot, trigger)
# Bots block
if bot_check_inlist(bot, botcom.instigator, [bot.nick]):
return
# does not apply to bots
if "altbots" in bot.memory:
if bot_check_inlist(bot, botcom.instigator, bot.memory["altbots"].keys()):
return
if not bot_permissions_check(bot, botcom):
return osd(bot, botcom.instigator, 'notice', "I was unable to process this Bot Nick command due to privilege issues.")
botcom.directory = get_nick_value(bot, botcom.instigator, 'temp', 'unsorted', 'current_admin_dir') or bot.memory["botdict"]["tempvals"]["bot_info"][str(bot.nick)]["directory_main"]
osd(bot, botcom.channel_current, 'say', "attempting to git pull " + botcom.directory)
gitpull(bot, botcom.directory)
| [
"sam@deathbybandaid.net"
] | sam@deathbybandaid.net |
16b30666c8848aad3076537200b3ee44a4225c37 | 31df36bde8d00fbafdada3e40705036aa7b136f0 | /todo_app/migrations/0001_initial.py | b3924a8344f1829e79c8aa8bccf06f1fcd5617b2 | [] | no_license | IMsumitkumar/TO-DO-Django-rest-framework- | 9d1a562e20850d8a9b87885a7d8c9ed66fe05fc9 | 3d6a9019bc4b317d9bdaffe2d05251f1845b3d36 | refs/heads/master | 2022-12-05T06:37:07.632915 | 2020-08-26T20:14:58 | 2020-08-26T20:14:58 | 290,594,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # Generated by Django 3.0.3 on 2020-08-05 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('completed', models.BooleanField(blank=True, default=False, null=True)),
],
),
]
| [
"sk20506942@gmail.com"
] | sk20506942@gmail.com |
17c090bdb8836170c137db5dfbe4f020d57883d9 | 0fc1ba5ecb2d8eac5533890c0c1df2034e42eeff | /s14/clase/edmonds-karp.py | b1488cae23f42645e0cf35d0c4bdcec6ba577986 | [] | no_license | oscarburga/tutorias-complejidad-algoritmica-2021-1 | 00c8d017ed63d10378b7ba543096564082ef3d3c | c2ad9c27e082de442e739f61264f19160ade9c07 | refs/heads/master | 2023-06-12T07:16:26.528929 | 2021-07-04T21:00:03 | 2021-07-04T21:00:03 | 354,336,135 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | #!/usr/bin/python3.7
import resource, sys
resource.setrlimit(resource.RLIMIT_STACK, (2**29, -1))
sys.setrecursionlimit(10**7)
inf = 10**18
# math.inf es de tipo float
# aqui estamos usando puros enteros,
# entonces mejor trabajemos al 100% con enteros
# buena practica: evitar convertir tipos de datos constantemente
n, m = map(int, input().split())
adj = [[] for _ in range(n)]
# Matriz de capacidades:
# cap[u][v]: capacidad restante en la arista de u->v
cap = [[0]*n for _ in range(n)]
for _ in range(m):
x, y, w = map(int, input().split())
x -= 1
y -= 1
cap[x][y] += w
adj[x].append(y)
adj[y].append(x)
# ford-fulkerson: utilizamos DFS para hallar los caminos aumentantes
# recordar que un camino aumentante es simplemente un camino en el grafo
# residual que permite enviar un flujo positivo de la fuente al sumidero
# en este caso, la fuente es el vertice 0 y el sumidero es el vertice N-1
from collections import deque
def bfs(s, t): #vertice, flujo que esta llegando al vertice
p = [-1] * n # arreglo de padres
p[s] = -2
q = deque()
q.append((s, inf))
while len(q):
v, f = q.popleft()
if v == t:
return f, p
for e in adj[v]:
if p[e] == -1 and cap[v][e]:
p[e] = v
new_flow = min(f, cap[v][e])
q.append((e, new_flow))
return 0, p
# Loop principal - ford fulkerson
flujo_total = 0
# mientras existe camino aumentante
# enviar flujo por ese camino aumentante
while True:
vis = [False] * n
sent_flow, p = bfs(0, n-1)
if sent_flow == 0: break
flujo_total += sent_flow
# a diferencia del ford fulkerson
# ahora necesitamos actualizar manualmente
# las capacidades del grafo residual luego del BFS
v = n-1
while v != 0:
u = p[v]
cap[u][v] -= sent_flow
cap[v][u] += sent_flow
v = u
print(flujo_total)
| [
"oscarburga2001@gmail.com"
] | oscarburga2001@gmail.com |
6bbc05fd9ff6b3fea46450045bac001afee57322 | ae67b9d90db114c1e15ce63ee0d27942d999a83b | /ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/resolutions_per_authority_status.py | 4d246d84d31991604e03f4f5736d0a140fb51da4 | [
"Apache-2.0"
] | permissive | Birds-Awesome-Org/alexa-apis-for-python | ecb2e351b5cb1b341dda5c3ebc38927fa6d66a93 | d22c1712cb53a442b72f830f53d97ef66075750b | refs/heads/master | 2022-12-30T04:37:51.214040 | 2020-10-09T21:41:03 | 2020-10-09T21:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,659 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_smapi_model.v1.skill.nlu.evaluations.resolutions_per_authority_status_code import ResolutionsPerAuthorityStatusCode as Evaluations_ResolutionsPerAuthorityStatusCodeV1
class ResolutionsPerAuthorityStatus(object):
"""
:param code: A code indicating the results of attempting to resolve the user utterance against the defined slot types. This can be one of the following: ER_SUCCESS_MATCH: The spoken value matched a value or synonym explicitly defined in your custom slot type. ER_SUCCESS_NO_MATCH: The spoken value did not match any values or synonyms explicitly defined in your custom slot type. ER_ERROR_TIMEOUT: An error occurred due to a timeout. ER_ERROR_EXCEPTION: An error occurred due to an exception during processing.
:type code: (optional) ask_smapi_model.v1.skill.nlu.evaluations.resolutions_per_authority_status_code.ResolutionsPerAuthorityStatusCode
"""
deserialized_types = {
'code': 'ask_smapi_model.v1.skill.nlu.evaluations.resolutions_per_authority_status_code.ResolutionsPerAuthorityStatusCode'
} # type: Dict
attribute_map = {
'code': 'code'
} # type: Dict
supports_multiple_types = False
def __init__(self, code=None):
# type: (Optional[Evaluations_ResolutionsPerAuthorityStatusCodeV1]) -> None
"""
:param code: A code indicating the results of attempting to resolve the user utterance against the defined slot types. This can be one of the following: ER_SUCCESS_MATCH: The spoken value matched a value or synonym explicitly defined in your custom slot type. ER_SUCCESS_NO_MATCH: The spoken value did not match any values or synonyms explicitly defined in your custom slot type. ER_ERROR_TIMEOUT: An error occurred due to a timeout. ER_ERROR_EXCEPTION: An error occurred due to an exception during processing.
:type code: (optional) ask_smapi_model.v1.skill.nlu.evaluations.resolutions_per_authority_status_code.ResolutionsPerAuthorityStatusCode
"""
self.__discriminator_value = None # type: str
self.code = code
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ResolutionsPerAuthorityStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com"
] | ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com |
9c0d8de69c2968fa840abcc6e154859bb8c79a48 | 6712885a4c2a056eee3b4488382b9afc2149f799 | /New LeetCode Problems/flood_fill.py | aed8336f938eaf04101b4de54e4a8e3705e75959 | [] | no_license | matthewharrilal/CS-Questions-GRIND | cac1cb562e5dad79ee4e224895d034f9c71d9ed3 | 7196c5e8df495d43ee91f218d6253c8a88a7d59d | refs/heads/master | 2020-04-16T01:30:53.289837 | 2019-07-24T04:34:21 | 2019-07-24T04:34:21 | 165,176,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,693 | py | class Solution(object):
def floodFill(self, image, sr, sc, newColor):
"""
:type image: List[List[int]]
:type sr: int
:type sc: int
:type newColor: int
:rtype: List[List[int]]
"""
initial_value = image[sr][sc]
image = self.dfs(image, sr, sc, newColor, initial_value)
return image
def dfs(self, image, row, column, newColor, initial_value, visited=None):
if visited is None:
visited = set()
if (row,column) in visited:
return image
# You only use a for loop for depth first search if you want to explore different paths from a given starting point
if (0 <= row < len(image)) and (0 <= column < len(image[0])) and image[row][column] == initial_value and (row, column) not in visited:
visited.add((row, column))
image[row][column] = newColor
image = self.dfs(image, row + 1, column, newColor, initial_value, visited)
image = self.dfs(image, row - 1, column, newColor, initial_value, visited)
image = self.dfs(image, row, column + 1, newColor, initial_value, visited)
image = self.dfs(image, row, column - 1, newColor, initial_value, visited)
# # For each of the row and columns for neighbors
# for neighbor_row, neighbor_column in [(row + 1, column), (row - 1, column), (row, column + 1), (row, column - 1) ]:
# print(row, column)
# # First check if they are in bounds ... if they are in bounds then you check for color
# if (0 <= neighbor_row < len(image)) and (0 <= neighbor_column < len(image[0])) and (row, column) not in visited:
# # If the current neighbor has the same color as the current pixel update
# if image[neighbor_row][neighbor_column] == initial_value:
# # First update the neighbor's color
# # image[neighbor_row][neighbor_column] = newColor
# image[row][column] = newColor
# visited.add((row,column))
# self.dfs(image, neighbor_row, neighbor_column, newColor, initial_value, visited)
# # After it's neighbors have been resolved change the centerpiece's color
return image | [
"matthewharrilal@gmail.com"
] | matthewharrilal@gmail.com |
d7009b78267a75e0972f520f5271a6afec7e9767 | 936dc2666f27de7a7d1428c7ad2ded62a722b8fa | /src/projects/migrations/0017_alter_project_contract_link.py | bfd01d72a5c29ef1378ad353f67404924b02d0de | [
"ISC"
] | permissive | MTES-MCT/aides-territoires | 03451a32bdeaab3812b8593bfe3a27c1b1d9a182 | af9f6e6e8b1918363793fbf291f3518ef1454169 | refs/heads/master | 2023-09-04T22:15:17.819264 | 2023-08-25T13:19:17 | 2023-08-25T13:19:17 | 124,301,398 | 21 | 11 | NOASSERTION | 2023-09-12T13:46:49 | 2018-03-07T22:19:11 | Python | UTF-8 | Python | false | false | 1,046 | py | # Generated by Django 4.2.2 on 2023-06-20 12:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("projects", "0016_alter_project_due_date_and_more"),
]
operations = [
migrations.AlterField(
model_name="project",
name="contract_link",
field=models.CharField(
blank=True,
choices=[
("ACV1", "Action Coeur de Ville 1"),
("ACV2", "Action Coeur de Ville 2"),
(
"AMI",
"AMI pour l’égalité entre les femmes et les hommes dans les territoires ruraux",
),
("CRTE", "CRTE"),
("PCAET", "PCAET"),
("PVD", "Petites Villes de Demain"),
],
max_length=10,
null=True,
verbose_name="Appartenance à un plan/programme/contrat",
),
),
]
| [
"noreply@github.com"
] | MTES-MCT.noreply@github.com |
0724ad5f522ef9ddb6cd0a6db10f6a57e98dca56 | beae392dcd51001613e5d3e226587d646d854c1f | /ML_Finance/OptimizeAndFitLine.py | f4ee7558d5eeae3765e5b442004a34c6dc14846f | [] | no_license | ShubraChowdhury/ML | 2c7a0a51ca657dda47ceb8a19cecbcafa6cd16b0 | 1e631f2ab86bfd35e2c8790501a7effb4e0d106a | refs/heads/master | 2021-05-15T14:44:28.753164 | 2018-11-18T14:06:48 | 2018-11-18T14:06:48 | 106,418,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,496 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 28 08:49:38 2016
@author: DevAdmin
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as op
def error(line, data):
""" Compute error between a given line model and observed data
Parameters
line : tuple/list/array (C0,C1) where C0 is the slope of the line
and C1 is the intercept of the line
data : 2D array where each row is a point (x,y)
Return: error as a single real value
"""
""" If we sum up the squares of the residuals of all the points
from the line we get a measure of the
fitness of the line. Our aim should be to minimize this value.
equation is y =mx + c so the error or residual
so the residual = y - (mx=c) , sum of error = (y -(mx+c))^2
data[:,1] = y value , data[:,0] = x value
"""
err = np.sum((data[:,1] - (line[0]*data[:,0]+line[1]))**2)
return err
def fit_line(data, error_func):
""" Fit the line to a given data , using supplied error function
Parameters
data : 2D array where each row is a point (x0,y)
error_func: function that computes the error between
a line and observed data
Returns: the line that minimizes the error function
"""
""" Generate initial guess for this model """
l = np.float32([0,np.mean(data[:,1])])
x_ends = np.float32([-5,5])
plt.plot(x_ends,l[0]*x_ends + l[1],'m--',linewidth=2, label="Initial Guess")
result = op.minimize(error_func,l,args=(data,),method='SLSQP',options={'disp':True})
return result.x
def test_run():
""" Define Original Line """
l_orig = np.float32([4,2])
print("Original Line: C0={}, C1={}".format(l_orig[0],l_orig[1]))
Xorig = np.linspace(0,10,21)
Yorig = l_orig[0]*Xorig + l_orig[1]
plt.plot(Xorig,Yorig,'b',linewidth=2, label ="Original Line")
""" Introduce Noice """
noise_sigma = 3.0
noise = np.random.normal(0, noise_sigma,Yorig.shape )
data = np.asarray([Xorig,Yorig+noise]).T
plt.plot(data[:,0],data[:,1],'go',label="Data points")
""" Try to fit the line to this data """
l_fit = fit_line(data , error)
print("Fitted Line: C0={}, C1={}".format(l_fit[0],l_fit[1]) )
plt.plot(data[:,0],l_fit[0]*data[:,0]+l_fit[1],'r--',label="Fitted Line")
#
if __name__ =="__main__":
test_run()
| [
"noreply@github.com"
] | ShubraChowdhury.noreply@github.com |
80e801fbafd132a8341b829abd5e3072fded48bd | 64460c308cd18945e93e8fb014a35855b734e05f | /src/tasks/waterbird.py | 09bcedc2b4140429779ee92b10a1fcc372bd2dc9 | [] | no_license | davidrmh/P-DRO | b3e6e304b2de7ce05fe387acd37bf2b4f3ae8e7c | fba797fdd833f87a737c13ef707e71541cc70911 | refs/heads/main | 2023-03-23T22:33:16.926562 | 2021-03-22T21:28:42 | 2021-03-22T21:28:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | #!/usr/bin/env python3
import os.path
from ..data.waterbird import WaterbirdDataset
from .task import Task
class Waterbird(Task):
"""Split CIFAR into separate classes"""
def __init__(self, path, img_size=84, in_memory=True):
super(Waterbird, self).__init__()
self.path = os.path.join(path, "waterbird_complete95_forest2water2")
self.res = (img_size, img_size)
self.in_memory = in_memory
self._name = "Waterbird"
self._load_data()
def _load_data(self):
# Load MNIST train data
cache_prefix = os.path.join(self.path, "cached_")
self._train_data = WaterbirdDataset.from_folder(
self.path,
"train",
self.res,
cache_prefix=cache_prefix,
in_memory=self.in_memory,
)
self._valid_data = WaterbirdDataset.from_folder(
self.path,
"valid",
self.res,
cache_prefix=cache_prefix,
in_memory=self.in_memory,
)
self._test_data = WaterbirdDataset.from_folder(
self.path,
"test",
self.res,
cache_prefix=cache_prefix,
in_memory=self.in_memory,
)
print(
len(self._train_data),
len(self._valid_data),
len(self._test_data)
)
@property
def n_classes(self):
return 2
@property
def input_size(self):
return (3, self.img_size, self.img_size)
def collate_fn(self, *args):
"""Collater to make batches"""
return self.train_data.collate_fn(*args)
| [
"pmichel31415@gmail.com"
] | pmichel31415@gmail.com |
edac5ea3b0682e65fd05dae85284ef059fd107b7 | 3e660e22783e62f19e9b41d28e843158df5bd6ef | /script.me.syncsmashingfromgithub/smashingfavourites/scripts/checked/ideaordemo/printtologwaskodilog.py | 6823b7c81ef3f5b871a6ef28dbcfc27b4316bbde | [] | no_license | monthou66/repository.smashingfavourites | a9603906236000d2424d2283b50130c7a6103966 | f712e2e4715a286ff6bff304ca30bf3ddfaa112f | refs/heads/master | 2020-04-09T12:14:34.470077 | 2018-12-04T10:56:45 | 2018-12-04T10:56:45 | 160,341,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | #!/usr/bin/python
print "testing********************************************************************************"
mess = sys.argv[1]
def message():
if mess == 1:
message = "one"
elif mess == 2:
message = "two"
else:
message = "balls"
print message()
print "testing********************************************************************************"
exit() | [
"davemullane@gmail.com"
] | davemullane@gmail.com |
4936f83f132b296c9d8ead10fe8022a6728e0d3b | b92adbd59161b701be466b3dbeab34e2b2aaf488 | /testing/testing_submissions/testing_assignment_003/venv/bin/pylint | e4b4c059f8234121a722c73fe47bbf82ae1cb450 | [] | no_license | R151865/cloud_9_files | 7486fede7af4db4572f1b8033990a0f07f8749e8 | a468c44e9aee4a37dea3c8c9188c6c06e91cc0c4 | refs/heads/master | 2022-11-22T10:45:39.439033 | 2020-07-23T09:31:52 | 2020-07-23T09:31:52 | 281,904,416 | 0 | 1 | null | 2022-11-20T00:47:10 | 2020-07-23T09:08:48 | Python | UTF-8 | Python | false | false | 244 | #!/home/ec2-user/environment/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"r151865@rguktrkv.ac.in"
] | r151865@rguktrkv.ac.in | |
c2f8a182a164740adceb99c8dd13c0e3eed81592 | 385c523c08eea5e95d323de5c9be45ddbc7f51f9 | /notebook/moscatel/models.py | 2437e62fa7fa2ed2cba90b695dc8567f146e336f | [] | no_license | jpdeleon/moscatel | d55f393c780600c8b5477a056c232578d333a699 | 29b1ce813cf649c8e4aaabc22aa71d186731fee0 | refs/heads/master | 2021-01-22T23:10:51.028885 | 2017-07-12T03:19:51 | 2017-07-12T03:19:51 | 85,614,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,617 | py | import warnings
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting
from scipy.optimize import curve_fit
from scipy.ndimage.filters import gaussian_filter
from moscatel import utils
def gauss1D(x, *params):
A, mu, sigma, eps= params
return A*np.exp(-(x-mu)**2/(2.*sigma**2)) + eps
def model_gaussian(image_crop,convolve=False,verbose=False, show_fit=False, return_fwhm=False):
#normalize image
if convolve==True:
sigma_estimate = 4
image_crop = gaussian_filter(image_crop,sigma=sigma_estimate)
image_crop /= np.max(image_crop)
# https://python4astronomers.github.io/fitting/sherpa.html
i,j = np.unravel_index(image_crop.argmax(), image_crop.shape) #take x,y max
peak_x=image_crop[i,:]
peak_y=image_crop[:,j]
#estimate mean and standard deviation
ydata = (peak_x+peak_y)/2.0
#ydata /= np.max(ydata)
xdata = np.array(range(len(ydata)))
xmean = len(xdata)/2.0
sigma = np.std(ydata)
amp = np.max(ydata)
eps = np.median(ydata)
#import pdb;pdb.set_trace()
#fitting
popt, pcov = curve_fit(gauss1D, xdata, ydata, p0 = [amp, xmean, sigma, eps])
if show_fit == True:
plt.plot(xdata,gauss1D(xdata, *popt), label='Gaussian fit')
plt.plot(xdata,ydata,'o',label='data',alpha=0.5)
plt.legend()
if verbose==True:
print('A: {}\nmu: {}\nsigma= {}\neps: {}'.format(popt[0],popt[1], popt[2], popt[3]))
if return_fwhm==True:
fwhm=utils.sigma_to_fwhm(popt[2])
return popt, fwhm
return popt
def model_gaussian2D(img_crop, verbose=False, fwhm=8., return_fwhm=False):
sigma= utils.fwhm_to_sigma(fwhm)
try:
#get 1D fit results
result_1Dfit = model_gaussian(img_crop)
amp, mu, sigma, eps = result_1Dfit
#initialize model
g_init = models.Gaussian2D(amplitude=amp,x_mean=mu, y_mean=mu, x_stddev=sigma, y_stddev=sigma)
except:
sigma= utils.fwhm_to_sigma(fwhm)
#if 1D fitting fails due to wrong initial centroiding
x_mean, y_mean = img_crop.shape[0]/2, img_crop.shape[1]/2
#initialize model using default values
g_init = models.Gaussian2D(amplitude=1,x_mean=x_mean, y_mean=y_mean, x_stddev=sigma, y_stddev=sigma)
#fit model
fit_g = fitting.LevMarLSQFitter()
#normalize image before fitting
img_crop_norm =img_crop/np.max(img_crop)
x,y=range(img_crop.shape[0]),range(img_crop.shape[1])
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
g = fit_g(g_init, x, y, img_crop_norm[y,x])
if verbose==True:
print(g.param_names,g.param_sets)
if return_fwhm==True:
fwhm_mean = utils.sigma_to_fwhm(np.abs((g.x_stddev.value+g.y_stddev.value)/2))
return g, fwhm_mean
return g
def get_fwhm(img_crop, convolve=False, verbose=False, show_fit=False, method='1D'):
try:
fit_result1D = model_gaussian(img_crop,convolve=convolve, verbose=verbose, show_fit=show_fit);
except:
warnings.warn('no good 1D gaussian fit')
if method=='2D':
try:
fit_result2D = model_gaussian2D(img_crop,convolve=convolve, verbose=verbose, show_fit=show_fit);
except:
warnings.warn('no good 2D gaussian fit')
g = model_gaussian2D(img_crop)
sigma_mean = np.abs((g.x_stddev.value+g.y_stddev.value)/2)
fwhm_mean = utils.sigma_to_fwhm(sigma_mean)
return fwhm_mean
else: #1D
return utils.sigma_to_fwhm(fit_result1D[2])
| [
"jpdeleon.bsap@gmail.com"
] | jpdeleon.bsap@gmail.com |
bb1957eea0d4ed3918f498288f4568ea16b8cc32 | f0fd2b4f56b1753e47139a3557a1625abcfead9e | /django/django_fundamentals/books_authors_shell copy/read/migrations/0002_author_notes.py | ba6f997fbecc30dbebbe77a7829bb0fd53a537f3 | [] | no_license | nlee1229/Python-Stack | 16dd6078be98392d8a21a93965beb7d39ba4157e | 1aba5cf17f1f6c50d8fd50de031fcd6ec2bdda21 | refs/heads/master | 2023-03-26T05:12:14.264780 | 2021-03-22T01:56:22 | 2021-03-22T01:56:22 | 328,876,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # Generated by Django 2.2.4 on 2020-11-06 01:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('read', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='author',
name='notes',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"72540269+nlee1229@users.noreply.github.com"
] | 72540269+nlee1229@users.noreply.github.com |
af877db2a3557b2f2f8cc28b36a87cd4436541ae | 13c14be20f16ffc14b7cde71ed8c4179e2410a0b | /python/itertools/itertools-product.py | bc998829e6f43581fbdbeec69ff4e231a9c533e2 | [] | no_license | gautamits/hackerrank | 79688e5735a27eed032ce0c34f4fe253cfb6b572 | aee6b00f4cd39c18e9107e933cceb55b9677c3c7 | refs/heads/master | 2020-05-21T22:55:50.977437 | 2018-12-11T05:09:36 | 2018-12-11T05:09:36 | 61,579,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from itertools import product
a=[]
a.append(map(int,raw_input().split()))
a.append(map(int,raw_input().split()))
print " ".join(map(str,list(product(*a)))) | [
"gautamamits95@gmail.com"
] | gautamamits95@gmail.com |
023b8fcd6d5eb2c84a834045fcaab3fad74bf426 | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/lib/python3.6/site-packages/werkzeug/_reloader.py | b8affbfaf3f213035d1af24083e04d179718cf1a | [] | no_license | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:cb9230666c805b39a95ad7d873c66804bfb5ce66cabb2ba57948e0073b851041
size 11575
| [
"Nqk180998!"
] | Nqk180998! |
1f1f32e09e1aedb0e123abef9b9abff5affa06e3 | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/644.py | bd0062c1612ae51de4128e84257e4bb08e7798cd | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 691 | py | class Solution:
def findMaxAverage(self, nums: List[int], k: int) -> float:
def sub(mid):
sm = pre = mn = 0
for i in range(k):
sm += nums[i] - mid
if sm >= 0:
return True
for i in range(k, len(nums)):
sm += nums[i] - mid
pre += nums[i - k] - mid
mn = min(mn, pre)
if sm >= mn:
return True
return False
l, r = min(nums), max(nums)
while l + 1E-6 < r:
mid = (l + r) / 2
if sub(mid):
l = mid
else:
r = mid
return l | [
"cenkay.arapsagolu@gmail.com"
] | cenkay.arapsagolu@gmail.com |
4efaa604ba95e701f0daf84a32fa872c854e2d5d | 84d0c498d10958e763e303a80bfec1c2ff0da1f9 | /raven_django/management/__init__.py | e011a71d8e72588431c0d8fe1a373d8c128e6ccc | [
"BSD-3-Clause"
] | permissive | nikolas/raven-django | 08e54b4dda8bc53c621659b8d3a2b71759c0b7cc | f9065d60b579046ace266741a2f1e0e1d55c4f38 | refs/heads/master | 2020-05-23T11:18:38.516078 | 2014-07-26T09:35:19 | 2014-07-26T09:35:19 | 34,874,324 | 0 | 0 | null | 2015-04-30T20:10:23 | 2015-04-30T20:10:23 | null | UTF-8 | Python | false | false | 1,589 | py | """
raven_django.raven.management
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import sys
from functools import wraps
from django.conf import settings
def patch_base_command(cls):
"""
Patches ``cls.execute``, returning a boolean describing if the
attempt was successful.
"""
try:
original_func = cls.execute
except AttributeError:
# must not be a capable version of Django
return False
if hasattr(original_func, '__raven_patched'):
return False
def can_capture(cls):
return 'sentry' not in settings.INSTALLED_APPS
@wraps(original_func)
def new_execute(self, *args, **kwargs):
try:
return original_func(self, *args, **kwargs)
except Exception:
if can_capture(type(self)):
from raven_django.models import client
client.captureException(extra={
'argv': sys.argv
})
raise
new_execute.__raven_patched = True
cls.execute = new_execute
return True
if ('raven_django' in settings.INSTALLED_APPS
or 'raven.contrib.django' in settings.INSTALLED_APPS
or 'raven.contrib.django.raven_compat' in settings.INSTALLED_APPS):
try:
from django.core.management.base import BaseCommand
except ImportError:
# give up
pass
else:
patch_base_command(BaseCommand)
| [
"xordoquy@linovia.com"
] | xordoquy@linovia.com |
3db8e3a44e9b6f7ee7f5f3ee716ce2af413c1025 | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-adb/aliyunsdkadb/request/v20190315/DescribeAutoRenewAttributeRequest.py | 5520ea659e7e68fa89a55c79e277c964b88dae9f | [
"Apache-2.0"
] | permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 2,551 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkadb.endpoint import endpoint_data
class DescribeAutoRenewAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'adb', '2019-03-15', 'DescribeAutoRenewAttribute','ads')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DBClusterIds(self):
return self.get_query_params().get('DBClusterIds')
def set_DBClusterIds(self,DBClusterIds):
self.add_query_param('DBClusterIds',DBClusterIds) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
14e3153830d0ff69b185461c524aab1119d22619 | 63ba933a294865f65409635f62e0f1d59f725f37 | /src/arrays/binPack.py | 2e77ffde61fd1c0e826333caf4fad122a0c6913a | [
"CC0-1.0"
] | permissive | way2arun/datastructures_algorithms | fc4302bdbb923ef8912a4acf75a286f2b695de2a | 4ea4c1579c28308455be4dfa02bd45ebd88b2d0a | refs/heads/master | 2021-12-07T04:34:35.732026 | 2021-09-30T12:11:32 | 2021-09-30T12:11:32 | 203,658,808 | 1 | 0 | null | 2020-08-08T15:55:09 | 2019-08-21T20:23:46 | Python | UTF-8 | Python | false | false | 2,947 | py | """
Bin Packing Problem (Minimize number of used Bins)
Given n items of different weights and bins each of capacity c,
assign each item to a bin such that number of total used bins is minimized.
It may be assumed that all items have weights smaller than bin capacity.
Input: wieght[] = {4, 8, 1, 4, 2, 1}
Bin Capacity c = 10
Output: 2
We need minimum 2 bins to accommodate all items
First bin contains {4, 4, 2} and second bin {8, 2}
Input: wieght[] = {9, 8, 2, 2, 5, 4}
Bin Capacity c = 10
Output: 4
We need minimum 4 bins to accommodate all items.
Input: wieght[] = {2, 5, 4, 7, 1, 3, 8};
Bin Capacity c = 10
Output: 3
Lower Bound
We can always find a lower bound on minimum number of bins required. The lower bound can be given as :
Min no. of bins >= Ceil ((Total Weight) / (Bin Capacity))
In the above examples, lower bound for first example is “ceil(4 + 8 + 1 + 4 + 2 + 1)/10” = 2 and lower bound in second example is “ceil(9 + 8 + 2 + 2 + 5 + 4)/10” = 3.
This problem is a NP Hard problem and finding an exact minimum number of bins takes exponential time. Following are approximate algorithms for this problem.
Applications
Loading of containers like trucks.
Placing data on multiple disks.
Job scheduling.
Packing advertisements in fixed length radio/TV station breaks.
Storing a large collection of music onto tapes/CD’s, etc.
"""
class Solution:
def nextfit(self, weight, c):
res = 0
rem = c
for _ in range(len(weight)):
if rem >= weight[_]:
rem = rem - weight[_]
else:
res += 1
rem = c - weight[_]
return res
def firstFit(self, weight, n, c):
# Initialize result (Count of bins)
res = 0
# Create an array to store remaining space in bins
# there can be at most n bins
bin_rem = [0] * n
# Place items one by one
for i in range(n):
# Find the first bin that can accommodate
# weight[i]
j = 0
# Initialize minimum space left and index
# of best bin
min = c + 1
bi = 0
for j in range(res):
if bin_rem[j] >= weight[i] and bin_rem[j] - weight[i] < min:
bi = j
min = bin_rem[j] - weight[i]
# If no bin could accommodate weight[i],
# create a new bin
if min == c + 1:
bin_rem[res] = c - weight[i]
res += 1
else: # Assign the item to best bin
bin_rem[bi] -= weight[i]
return res
# Driver Code
weight = [2, 5, 4, 7, 1, 3, 8]
c = 10
n = len(weight)
solution = Solution()
print("Number of bins required in Next Fit :", solution.nextfit(weight, c))
print("Number of bins required in Next Fit :", solution.firstFit(weight, n, c))
| [
"way2aru@yahoo.com"
] | way2aru@yahoo.com |
a1afd59f2383d1c1d8ee3a291ac661bd40087f98 | 01ac9e40052a468dd472a296df0003c4e629e2c9 | /news_all/spiders_old/shanxicm_all.py | 2c4298366b4cccc2e58e3004a296b2ce82e2d04f | [] | no_license | Pintrue/news_all | b5cee16584ed92e6574edd825b574214df65d917 | eb8c32c79bdacd8e2f76b88f27871c3cd0118006 | refs/heads/master | 2022-03-23T13:34:10.354029 | 2019-11-22T07:40:50 | 2019-11-22T07:40:50 | 223,058,997 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,666 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from news_all.spider_models import NewsRCSpider
class Shanxichuanmei_allSpider(NewsRCSpider):
"""陕西传媒网"""
name = 'sxcm'
mystart_urls = {
'http://www.sxdaily.com.cn/GB/26/508/index.html': 1301227, # 陕西传媒网 原创-资讯聚焦
'http://www.sxdaily.com.cn/GB/216/83/526/index.html': 1301484, # 陕西传媒网 教育-资讯
}
rules = (
#http://www.sxdaily.com.cn/n/2019/0624/c526-6500628.html
Rule(LinkExtractor(allow=(r'sxdaily.com.cn/n/%s\d{2}/c\d{3}-\d+.html' % datetime.today().strftime('%Y/%m'), ),
), callback='parse_item',
follow=False),
)
def parse_item(self, response):
xp = response.xpath
try:
title = xp("//div[@class='container title']/h1/text()").extract_first()
content_div = xp("//div[@class='content']")[0]
pubtime = xp("//div[@class='container title']/div/p[1]").re(r'\d{2,4}年\d{1,2}月\d{1,2}日')[0]
origin_name = xp("//div[@class='container title']/div/p[2]/text()").extract_first('')
except:
return self.parse_item_2(response)
content, media, _, _ = self.content_clean(content_div)
return self.produce_item(
response=response,
title=title,
# self.get_page_title(response).split('_')[0]
pubtime=pubtime,
origin_name=origin_name,
content=content,
media=media
)
#http://www.sxdaily.com.cn/n/2019/0621/c508-6500226.html
def parse_item_2(self, response):
xp = response.xpath
try:
title = xp("//div[@class='text width1000 clearfix']/h1/text()").extract_first()
content_div = xp("//div[@class='text width1000 clearfix']")[0]
pubtime = xp("//div[@class='text width1000 clearfix']/h3").re(r'\d{2,4}年\d{1,2}月\d{1,2}日')[0]
origin_name = xp("//div[@class='text width1000 clearfix']/h3/text()").extract_first('')
except:
return self.produce_debugitem(response, "xpath error")
content, media, _, _ = self.content_clean(content_div)
return self.produce_item(
response=response,
title=title,
# self.get_page_title(response).split('_')[0]
pubtime=pubtime,
origin_name=origin_name,
content=content,
media=media
)
| [
"py416@ic.ac.uk"
] | py416@ic.ac.uk |
984dc779ae03db65aa5cf196064ce9583373b4c7 | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/secondaires/navigation/commandes/vent/liste.py | 65ff36e8051daaf0b236c3e0f09e072847d05caf | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'liste' de la commande 'vent'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmListe(Parametre):
"""Commande 'vent liste'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "liste", "list")
self.schema = ""
self.aide_courte = "liste les vents existants"
self.aide_longue = \
"Cette commande liste les vents existants."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
vents = list(type(self).importeur.navigation.vents.values())
vents = sorted(vents, key=lambda n: n.cle)
if vents:
lignes = [
" Clé | Étendue | " \
"Position | Vitesse | Direction"]
for vent in vents:
vitesse = vent.vitesse.norme
vitesse = round(vitesse, 3)
vitesse = str(vitesse).replace(".", ",")
etendue = vent.etendue and vent.etendue.cle or "aucune"
direction = vent.vitesse.direction
direction = round(direction, 3)
direction = str(direction).replace(".", ",")
lignes.append(
" {:<15} | {:<15} | {:>15} | {:>10} | {:>9}".format(
vent.cle, etendue, vent.coordonnees, vitesse,
direction))
personnage << "\n".join(lignes)
else:
personnage << "Aucun vent n'est actuellement défini."
| [
"kredh@free.fr"
] | kredh@free.fr |
3333a07dc20203cbf1503febc29135cf866471da | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/search_suggestions_system.py | 35af16a9a95358d24b63e69cb907978adf85d9a1 | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 3,358 | py | # https://leetcode.com/problems/search-suggestions-system/
"""
Given an array of strings products and a string searchWord. We want to design a system that suggests at most three product names from products after each character of searchWord is typed. Suggested products should have common prefix with the searchWord. If there are more than three products with a common prefix return the three lexicographically minimums products.
Return list of lists of the suggested products after each character of searchWord is typed.
Example 1:
Input: products = ["mobile","mouse","moneypot","monitor","mousepad"], searchWord = "mouse"
Output: [
["mobile","moneypot","monitor"],
["mobile","moneypot","monitor"],
["mouse","mousepad"],
["mouse","mousepad"],
["mouse","mousepad"]
]
Explanation: products sorted lexicographically = ["mobile","moneypot","monitor","mouse","mousepad"]
After typing m and mo all products match and we show user ["mobile","moneypot","monitor"]
After typing mou, mous and mouse the system suggests ["mouse","mousepad"]
Example 2:
Input: products = ["havana"], searchWord = "havana"
Output: [["havana"],["havana"],["havana"],["havana"],["havana"],["havana"]]
Example 3:
Input: products = ["bags","baggage","banner","box","cloths"], searchWord = "bags"
Output: [["baggage","bags","banner"],["baggage","bags","banner"],["baggage","bags"],["bags"]]
Example 4:
Input: products = ["havana"], searchWord = "tatiana"
Output: [[],[],[],[],[],[],[]]
Constraints:
1 <= products.length <= 1000
There are no repeated elements in products.
1 <= Σ products[i].length <= 2 * 10^4
All characters of products[i] are lower-case English letters.
1 <= searchWord.length <= 1000
All characters of searchWord are lower-case English letters.
"""
from __future__ import annotations
from bisect import bisect_left
from dataclasses import dataclass, field
@dataclass
class TrieNode:
children: dict[str, TrieNode] = field(default_factory=dict)
value: list[str] = field(default_factory=list)
class ProductDictionary:
def __init__(self) -> None:
self.root = TrieNode()
def insert(self, product: str) -> None:
node = self.root
for char in product:
if char not in node.children:
node.children[char] = TrieNode()
node = node.children[char]
node.value.append(product)
return
def search(self, word: str) -> list[str]:
node = self.root
for char in word:
if char not in node.children:
return []
node = node.children[char]
return node.value[:3]
def suggested_products(products: list[str], searchWord: str) -> list[list[str]]:
products.sort()
pd = ProductDictionary()
for product in products:
pd.insert(product)
ans = []
n = len(searchWord)
for i in range(1, n + 1):
ans.append(pd.search(searchWord[:i]))
return ans
def suggested_products(products: list[str], searchWord: str) -> list[list[str]]:
products.sort()
ans, prefix, i = [], "", 0
for char in searchWord:
prefix += char
i = bisect_left(products, prefix, i)
ans.append([w for w in products[i : i + 3] if w.startswith(prefix)])
return ans
if __name__ == "__main__":
suggested_products(["mobile", "mouse", "moneypot", "monitor", "mousepad"], "mouse")
| [
"tyivanwu@gmail.com"
] | tyivanwu@gmail.com |
0e4fe342e18762fbb8a1bc4750b26745d730e480 | 71877e3f343e3899da77878937362191fdc02a0f | /swiflow_analysis/swi_aggregation_comparison/compare_swi.py | d563cb0874c877b7f5736ada0aceb571a557e152 | [] | no_license | micahjohnson150/scripts | 2a9007ae6d2ad3eec3596aff6e016f6d13fb0652 | 32a8322cab7463dbcc4d6042e7d53a03c2ee2654 | refs/heads/master | 2023-05-26T04:48:27.005338 | 2023-01-21T14:08:51 | 2023-01-21T14:08:51 | 144,737,605 | 0 | 0 | null | 2023-05-22T21:36:55 | 2018-08-14T15:17:16 | Python | UTF-8 | Python | false | false | 1,342 | py | import pandas as pd
import matplotlib.pyplot as plt
f_ernesto = "./Tollgate_SWI_timeseries_from_ernesto.csv"
f_swiflow = "swi_from_swiflow.csv"
f_swiflow_basin = "./basin_catchments.csv"
df_ernesto = pd.read_csv(f_ernesto, names=["datetime","1","3","5","7","9"],
header=2, parse_dates=[0])
df_ernesto.set_index("datetime", inplace=True)
df = pd.read_csv(f_swiflow, header=0, parse_dates=[0])
df.set_index("datetime", inplace=True)
# Parse the area from enerestos file
with open(f_ernesto, 'r') as fp:
lines = fp.readlines()
fp.close()
area_line = lines[1]
ids = [s.strip() for s in lines[0].split(',')]
for i,s in enumerate(area_line.split(",")):
if i != 0:
df_ernesto[ids[i]] = df_ernesto[ids[i]] / float(s)
# just compare the totals
df_ernesto["total"] = df_ernesto.sum(axis=1)
df["total"] = df.sum(axis=1)
diff = df['total'] - df_ernesto['total']
# print(diff)
# print("MEAN = {}".format(diff.mean()))
# print("Min = {}".format(diff.min()))
# print("Max = {}".format(diff.max()))
# print("STD = {}".format(diff.std()))
plt.plot(df.index, df['total'], label="Produced in SWIFlow")
plt.plot(df_ernesto.index, df_ernesto['total'], label="Produced from Eneresto")
plt.legend()
plt.title("Prep_swi tool VS with E.Tujillo swi aggregation script")
plt.savefig("swi_comparison.png")
plt.show()
| [
"micah.johnson150@gmail.com"
] | micah.johnson150@gmail.com |
c7310bce92fed3500b1a50f978309ae31303d0fb | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20170601/get_virtual_network_gateway_learned_routes.py | 5c875d8b272d5c9c9b638d37f083bdd9454a09d7 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,482 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayLearnedRoutesResult',
'AwaitableGetVirtualNetworkGatewayLearnedRoutesResult',
'get_virtual_network_gateway_learned_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
"""
List of gateway routes
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(GetVirtualNetworkGatewayLearnedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayLearnedRoutesResult(
value=self.value)
def get_virtual_network_gateway_learned_routes(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20170601:getVirtualNetworkGatewayLearnedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayLearnedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(
value=__ret__.value)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
9f593f2817b494eb73c69db565b4aebe6106d997 | b62f8cc3dc7383bc1c58ac0f20ba187a685fd98d | /MyPortfolioDjangoProject/wsgi.py | 9024f94897f6cbba4237bd7215e1134c45edaf55 | [] | no_license | dilipksahu/dilipksahu.github.io | 9c153fd0777a59157fc77c1e2cd199b32b8e8757 | 489e177fd6680f40b24890ef586f717425e2aa10 | refs/heads/master | 2022-12-24T03:33:50.490601 | 2020-10-01T16:32:54 | 2020-10-01T16:32:54 | 300,344,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | """
WSGI config for MyPortfolioDjangoProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MyPortfolioDjangoProject.settings')
application = get_wsgi_application()
| [
"sahud048@gmail.com"
] | sahud048@gmail.com |
6d0052fbb9e7fb5414767163bdb2f2fa7e500bd1 | 5215ee22217a0bddc6a6eae3b0e49c1216a77bbc | /snippets/cap3ParserGenerators.py | 5f903d0079eca40fbe66d625456550323df04e21 | [
"Artistic-2.0"
] | permissive | PapenfussLab/Mungo | 5cda4d19a5ef4cb51da495f7abf259b4cd4d1697 | 02c5b0e48ecd28596cb9481b282753859f47fed6 | refs/heads/master | 2021-01-17T07:40:08.832760 | 2015-08-20T01:21:19 | 2015-08-20T01:21:19 | 41,067,597 | 1 | 3 | null | 2016-06-12T23:31:20 | 2015-08-20T01:02:28 | Python | UTF-8 | Python | false | false | 2,986 | py | # CAP3 standalone parsers
def parserGenerator(iFile):
header = ''
contents = []
state = State.begin
for line in iFile:
line = line.rstrip()
if not line:
# Blank
pass
elif state==State.begin:
if line[0]!='*':
# Junk at top
pass
elif line[0]=='*':
# First contig
state = State.section1or2
header = ''.join(line.replace('*', '').strip().split())
elif state==State.section1or2:
if line[0]=='*':
# New contig
yield header, contents
header = ''.join(line.replace('*', '').strip().split())
contents = []
elif line==Tokens.endSection1:
pass
else:
# Contents line
contents.append(line)
yield header, contents
def fullParserGenerator(iFile):
contigName = ''
readNames = []
contained = {}
aln = []
state = State.begin
for line in iFile:
line = line.rstrip()
if not line:
# Blank
pass
elif state==State.begin:
if line[0]!='*':
# Junk at top
pass
elif line[0]=='*':
# Section 1 - First contig
state = State.section1
contigName = ''.join(line.replace('*', '').split())
elif state==State.section1:
if line[0]=='*':
# Section 1 - New contig
yield State.section1,(contigName,readNames,contained)
contigName = ''.join(line.replace('*', '').split())
readNames = []
contained = {}
elif line[0]==' ':
# Section 1 - Read name, contained
tokens = line.strip().split()
readNames.append(tokens[0])
contained[tokens[0]] = tokens[-1]
elif line==endSection1Token:
# End of section 1
yield State.section1,(contigName,readNames,contained)
state = State.beginSection2
elif line[0]!=' ':
# Section 1 - Read name
readNames.append(line)
elif state==State.beginSection2 and line[0]=='*':
# Section 2 - First contig
state = State.section2
contigName = ''.join(line.replace('*', '').split())
elif state==State.section2:
if line[0]=='*':
# Section 2 - New contig
yield State.section2,(contigName,'\n'.join(aln))
contigName = ''.join(line.replace('*', '').split())
aln = []
elif line[0]!='*':
# Section 2 - Line in alignment
aln.append(line)
else:
raise Exception('cap3._fullParserGenerator')
yield State.section2,(contigName,'\n'.join(aln))
| [
"papenfuss@mac2576.wehi.edu.au"
] | papenfuss@mac2576.wehi.edu.au |
29c9c1c45cb0f0e35774a92ac9cbae8597de59d0 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /d3pm/text/tasks.py | 4916ac410505d43ba5fb1193b727bda89bf2c71d | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,918 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single import to access / gin register tasks."""
from d3pm.text import diffusion # pylint: disable=unused-import
from d3pm.text import types
class TaskRegistry:
"""A registry containing all tasks supported by D3PM."""
def __init__(self):
self.tasks = {}
def clear(self):
self.tasks = {}
def register(self, name, task):
self.tasks[name] = task
def list_tasks(self):
"""Returns a list of the available tasks."""
msg = "Available Tasks:\n\n"
for name in self.tasks:
msg += "* " + name + "\n"
return msg
def load(self, name):
"""Load a task registered with the TaskRegistry."""
if name not in self.tasks:
info_string = self.list_tasks()
raise ValueError(
f"Unable to find a tasks with the name {name}.\n\n{info_string}.")
return self.tasks[name]
_REGISTRY = TaskRegistry()
def load(name):
"""Load a tasks registered with the D3PM task registry.
Args:
name: the name of the task to load.
Returns:
a D3PM task.
"""
return _REGISTRY.load(name)
def register(name, task):
"""Register a task with the registry.
Args:
name: the name of the task to register.
task: a task to register.
Returns:
a training and validation task.
"""
_REGISTRY.register(name, task)
return task
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f28146e0dd215371221afd39ab297aeb7fdfc9e8 | c21535bd337e7059f0bf5cf2ef9b012692b855c7 | /blog/views.py | 65c4c119c8eb0d57eec50edaeba98e24a12228de | [] | no_license | samarmstrong88/my-first-blog | b171f10bd9cce5e5c35d6e43b22c80f8785c9eb2 | 92595810db2daa73da7045f4ca9ddae41a200cfd | refs/heads/master | 2021-01-19T17:00:32.565007 | 2017-08-28T06:01:24 | 2017-08-28T06:01:24 | 101,034,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
from django.shortcuts import render
from .models import Post
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render (request, 'blog/post_list.html', {'posts':posts})
def detail_list(request, post_id):
post = Post.objects.get(pk=post_id)
latest_comment_list = post.comment_set.order_by('created_date')[:5]
return render (request, 'blog/detail_list.html', {
'post':post,
'lcl':latest_comment_list})
| [
"="
] | = |
e3fd712c70db44494bb5f1c5c844a77579c891b9 | 78ae755947ad6fb3a7a37dd680e59bf5af9cb721 | /suru/suru/users/models.py | 590f643ec0152459dffe8db881dfa68581f75e00 | [] | no_license | fali007/django | ec01f1788781e3be8a7ddda0f7b1947ed326194d | c75396c94ebebf674eb499ea45ea19d45d9b6a6c | refs/heads/master | 2020-12-04T05:30:02.338895 | 2020-01-03T17:23:10 | 2020-01-03T17:23:10 | 231,632,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | from django.db import models
from django.contrib.auth.models import User
from PIL import Image
# Create your models here.
class Profile(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
image=models.ImageField(default='default.jpg',upload_to='profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self):
super().save()
img=Image.open(self.image.path)
if img.height>300 or img.width>300:
output_size=(300,300)
img.thumbnail(output_size)
img.save(self.image.path) | [
"noreply@github.com"
] | fali007.noreply@github.com |
3e9a2ce01e9f699ae38015ad6ffc8efebadefec1 | a5e5d39f42f468d35f18aab3e78c3c090046b0df | /apps/roulette/tests/test_logging.py | 59ef62266f77f2e4b62ed24ee47ddd4a343c978a | [] | no_license | WayneLambert/portfolio | 66198dfc18b3f254e6bc726575903c3e8f570dc4 | 7e02165386e4784f81e15bae0325a77cf45f410d | refs/heads/main | 2023-02-04T18:08:13.559223 | 2023-01-29T14:13:59 | 2023-01-29T14:13:59 | 180,239,669 | 5 | 1 | null | 2023-02-04T07:07:10 | 2019-04-08T22:02:22 | JavaScript | UTF-8 | Python | false | false | 204 | py | import os
from aa_project.settings import base
from apps.roulette.logging import log_file
def test_log_file_setup():
assert os.path.join(base.APPS_DIR, 'roulette/holiday_roulette.log') in log_file
| [
"wayne.a.lambert@gmail.com"
] | wayne.a.lambert@gmail.com |
22cd9eded0af52639910f322140b746baf239595 | e4bc9d7f2e177bcd9850ffa12e5b2ddabb3f98ab | /02. Data Science/2. Analysis/2. Excel/2excel_parsing_and_write.py | 3a90d282a077644dc637de6c0f289e7a98428ea8 | [] | no_license | yameenjavaid/bigdata2019 | 94b80b570b76cb174fcacddf8c8c1711a72e77b4 | 3565a1f10162d0ad147a9f189536b858ebe04ffa | refs/heads/master | 2022-01-13T15:39:09.793982 | 2019-04-29T00:14:07 | 2019-04-29T00:14:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # xlwt 모듈 설치
# 목적 : 단일 워크시트 처리
import sys
from xlrd import open_workbook
from xlwt import Workbook
input_file = sys.argv[1]
output_file = sys.argv[2]
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('jan_2013_output')
with open_workbook(input_file) as workbook :
worksheet = workbook.sheet_by_name('january_2013')
for row_index in range(worksheet.nrows) :
for column_index in range(worksheet.ncols) :
output_worksheet.write(row_index, column_index, worksheet.cell_value(row_index, column_index))
output_workbook.save(output_file) | [
"bgd0706@hanmail.net"
] | bgd0706@hanmail.net |
8cd6a57e9cce192f75ee616e48397155d4a6adf7 | 6bac331844a2f25e56f37b29de7fb79c497d6ad4 | /store/migrations/0001_initial.py | da15004b7df3a7e92c7b5c83c3ec5c7684142ac6 | [] | no_license | ratulkhan44/django_ecommerce | 766de76b7aae4519df24249ea49e54d05d39db52 | b3342ec5aa8af152e5998a6e8e9184b8da69296e | refs/heads/main | 2023-05-24T18:26:25.279079 | 2021-05-30T11:56:37 | 2021-05-30T11:56:37 | 370,646,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # Generated by Django 3.2.3 on 2021-05-24 08:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('category', '0002_alter_category_slug'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('description', models.TextField(max_length=500, unique=True)),
('price', models.FloatField()),
('product_image', models.ImageField(upload_to='images/products')),
('stock', models.IntegerField()),
('is_available', models.BooleanField(default=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='category.category')),
],
),
]
| [
"ratulcse.khan@gmail.com"
] | ratulcse.khan@gmail.com |
84bf16a2585d4f39efc961d844fc7b71f2809961 | 010e2215e0db148880ece9d9f16cb2d145f0cdf9 | /metrics/task_compute_metrics.py | c3461611ef46b11219e325ba3e4283d2c0b9c826 | [] | no_license | kangzhonghua/electra_pytorch | cf83898796cf9bfc1d44c3273803a85fd1d6fd77 | e453a4ba17e24cd57af7b77f8d81a12151da932a | refs/heads/master | 2021-04-15T21:43:46.414797 | 2020-03-21T15:01:13 | 2020-03-21T15:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,399 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import sys
import logging
logger = logging.getLogger(__name__)
try:
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
_has_sklearn = True
except (AttributeError, ImportError) as e:
logger.warning("To use data.metrics please install scikit-learn. See https://scikit-learn.org/stable/index.html")
_has_sklearn = False
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "lcqmc":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "cls":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "cmnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "iflytek":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wsc":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "tnews":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "afqmc":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "copa":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
| [
"1436496575@qq.com"
] | 1436496575@qq.com |
0e5a93b4e1a221c861ab60ce38306eb99f449544 | 12fcd6e05a1dd359cbba94e76278dc53daa319f5 | /src/billing/migrations/0002_auto_20191227_1342.py | 669fe9149c7a279c05e698c9b171bfaa0133e6ac | [] | no_license | Raju-Pinnam/stacks | 1b105c8e61b793f8ca06cf45371f67e586f7611c | a546a74b059140298b0d9d53f6e2ddb94c6c7028 | refs/heads/master | 2020-11-27T13:35:37.155907 | 2019-12-29T09:25:56 | 2019-12-29T09:25:56 | 229,464,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # Generated by Django 3.0.1 on 2019-12-27 08:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('billing', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='billingprofile',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"pinnampadmaraju@gmail.com"
] | pinnampadmaraju@gmail.com |
bf0b06383f948812e270deb17bef1b52028d6343 | 410f0d66049ca881dfdeb9b7f784bd70b1c3f6e7 | /citiesocial/citiesocial/spiders/getproductsfromcollecturl.py | 60451efcd336354b3fb6615f86f6a3e968889314 | [] | no_license | ans2human/Scrappers | 1bdf5a1a4a34752c58fb18d45ac01e3cb54b5fe1 | cb2183d25a8af08284f2a6c1311b8da24c720b96 | refs/heads/master | 2020-03-19T20:41:01.244911 | 2018-06-11T11:27:17 | 2018-06-11T11:27:17 | 136,911,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #-*- coding: utf-8 -*-
import scrapy
from citiesocial.items import CitiesocialItem
class GetproductsfromcollecturlSpider(scrapy.Spider):
name = 'getproductsfromcollecturl'
allowed_domains = []
start_urls = []
read_urls = open('collecturls.csv', 'r')
for url in read_urls.readlines():
url = url.strip()
allowed_domains = allowed_domains + [url[4:]]
start_urls = start_urls + [url]
read_urls.close()
def parse(self, response):
items = CitiesocialItem()
for href in response.xpath('//h3[@class="product-item__vendor"]/a/@href'):
items['produrl'] = response.urljoin(href.extract())
yield items
| [
"ans2human@gmail.com"
] | ans2human@gmail.com |
72d4c2579b98cbc7e5f7dda8281be95a5f798db2 | ba995756ff6c856abe98c387bd85ea8cfca66a74 | /medium/python338.py | 1af777d6b16f80e504f592755f95fc15b487fea9 | [] | no_license | HarshilModi10/MCP_Competition | ae7be388c947ce0a80a84dfe4cda426060d993c5 | 646e0fe39a51a1d48a8a987435307f7cfca7938a | refs/heads/master | 2020-04-24T17:27:31.522701 | 2019-08-04T15:40:12 | 2019-08-04T15:40:12 | 172,147,919 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py |
# time O(N) * integer size and space: O(N) array
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
output = []
for i in range(num+1):
output.append(self.count_number(i))
#return the output array
return output
def count_number(self, num):
#array to keep tract of count
count = 0
#count the number of 1s
while num:
if num & 1:
count += 1
num >>= 1
return count
#Faster solution using dynamic programming space O(N) time O(N)
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
"""
how does number of 1s work
0 -> 0
1 -> 1
2 -> 10:1
3 -> 11:2
4 -> 100:1
5 -> 101:2
6: -> 110: 2
7: -> 111: 3
100100 -> 100011
"""
if num == 0:
return [0]
bit = 1
n_bit = bit << 1
output = [0]
for i in range(1,num+1):
if n_bit <= i:
bit = n_bit
n_bit *= 2
count = 1
rem = i % bit
count += output[rem]
output.append(count)
return output
#Faster solution
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
if num == 0:
return [0]
if num == 1:
return [0,1]
if num == 2:
return [0,1,1
]
dp = [0] * (num + 1)
dp[1] = 1
dp[2] = 1
bit = 1
for i in range(3, num+1):
if i > 2**(bit+1):
bit += 1
dp[i] = 1 + dp[i % 2**bit]
return dp[:num+1]
| [
"modih1@mcmaster.ca"
] | modih1@mcmaster.ca |
af520f733c8fac34768c9d167ec2dd88edd60c4c | 81d0bfe1262008587ddf5ac12ae034d6922b9747 | /.history/test_20201016160500.py | 365196b6e30abb4cace2a64e9920b79fa6776ce4 | [] | no_license | elvinyeka/Smart-Mobile | 525fffac14b8c460e85002bbf154bf54b4a341fe | a32f557306ae1bfe3ae01f5a8beef93727cfbc47 | refs/heads/master | 2023-06-09T09:52:18.446572 | 2021-07-06T11:35:34 | 2021-07-06T11:35:34 | 313,988,596 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py |
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
db = SQLAlchemy(app)
class Human(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
age = db.Column(db.Integer)
def __init__(self,name,age):
self.name = name
self.age = age
def __repr__(self):
return f'Name: {self.name} Age: {self.age}' | [
"elvinyeka@gmail.com"
] | elvinyeka@gmail.com |
88b03c669a451d6bced94894be642fb8c26da16d | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/kikoAnalysis/era5_surge_reconstruction_mlr_ver2.py | 01dc8b1536a09e9ab7343c428696946b366e6dbe | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,908 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
----------------------------------------------------
This program is designed to reconstruct ERA-Five daily
maximum surge using MLR
----------------------------------------------------
@author: Michael Tadesse
"""
#import packages
import os
# import numpy as np
import pandas as pd
# from sklearn import metrics
# from scipy import stats
import statsmodels.api as sm
# from datetime import datetime
# from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
# from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
#defining directories
dir_in = 'G:\\05_era5\\kikoStuff\\combinedPred'
dir_out = 'G:\\05_era5\\kikoStuff\\test'
surge_path = 'G:\\05_era5\\kikoStuff\\05_dmax_surge_georef'
def reconstruct():
"""
run KFOLD method for regression
"""
#cd to the lagged predictors directory
os.chdir(dir_in)
#looping through
for tg in range(len(os.listdir())):
os.chdir(dir_in)
tg_name = os.listdir()[tg]
print(tg, tg_name)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['date'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: x.split()[0]
surge_time = pd.DataFrame(list(map(time_str, surge['date'])), columns = ['date'])
# time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
num_pc = X_pca.shape[1] #number of principal components
longitude = surge['lon'][0]
latitude = surge['lat'][0]
#surge reconstruction
pred_for_recon = pred[~pred.isna().any(axis = 1)]
pred_for_recon = pred_for_recon.reset_index().drop('index', axis = 1)
#standardize predictor data
dat = pred_for_recon.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred_for_recon['date'], dat_standardized], axis = 1)
X_recon = pred_standardized.iloc[:, 1:]
#apply PCA
pca = PCA(num_pc) #use the same number of PCs used for training
pca.fit(X_recon)
X_pca_recon = pca.transform(X_recon)
#model preparation
#first train model using observed surge and corresponding predictors
X_pca = sm.add_constant(X_pca)
est = sm.OLS(y['surge'], X_pca).fit()
#predict with X_recon and get 95% prediction interval
X_pca_recon = sm.add_constant(X_pca_recon)
predictions = est.get_prediction(X_pca_recon).summary_frame(alpha = 0.05)
#drop confidence interval and mean_se columns
predictions.drop(['mean_se', 'mean_ci_lower','mean_ci_upper'], \
axis = 1, inplace = True)
#final dataframe
final_dat = pd.concat([pred_standardized['date'], predictions], axis = 1)
final_dat['lon'] = longitude
final_dat['lat'] = latitude
final_dat.columns = ['date', 'surge_reconsturcted', 'pred_int_lower',\
'pred_int_upper', 'lon', 'lat']
#save df as cs - in case of interruption
os.chdir(dir_out)
final_dat.to_csv(tg_name)
#cd to dir_in
os.chdir(dir_in)
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
794a23bd4671a46dd35ba0ad4dcfad56898b3c42 | c6f493d0955803d58dc41f0a4b12847f1ce83712 | /py_journal_fielded_retrieval/queryTuneBoost_Clef2016.py | 4ccab369dd4e9c3ff675eee2a8e919680cdb4e09 | [] | no_license | ielab/field-based-retrieval | 7b94c0feb5cd0de25087cb7bb94c055b286257db | dd54785473af6fc70fd2242c4fc64323bcb1e8c2 | refs/heads/master | 2020-11-28T02:24:09.515215 | 2019-12-23T05:00:53 | 2019-12-23T05:00:53 | 229,679,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | import time
import os
import re
from lxml import etree
from elasticsearch import Elasticsearch
from queryBuilder_normal import query_builder_normal
import multiprocessing
from functools import partial
# server setting
es = Elasticsearch(urls='http://localhost', port=9200, timeout=500)
queryFile = '/volumes/ext/data/clef2016/queries2016.xml'
topPath = '/volumes/ext/jimmy/experiments/ipm_fielded_retrieval/data/'
topPrefix = "topTuneBoost_Clef2016"
tieBreakers = [1]
b = 0.75
k = 1.2
if not os.path.exists(topPath):
os.makedirs(topPath)
# Index Setting
docType = "clueweb"
indexName = "clueweb12b_all"
# load queries
tree = etree.parse(queryFile)
topics = tree.getroot()
queries = []
for topic in topics.iter("query"):
for detail in topic:
if detail.tag == "id":
queryNumber = detail.text
elif detail.tag == "title":
queryTerms = detail.text
#print queryTerms
queryTerms = re.sub(r'([^\s\w]|_)+', ' ', queryTerms)
#print queryTerms
if queryNumber == "129005":
queryTerms = "craving salt full body spasm need 12 hrs sleep can t maintain body temperature"
queryData = {"queryNumber": queryNumber, "queryTerms": queryTerms}
queries.append(queryData)
#print queryNumber + " >> " + queryTerms
startTime = time.time()
indexLap = time.time()
def es_search(p_alpha, p_tie, p_query):
print "processing alpha: " + str(p_alpha) + " tie: " + str(p_tie) + " qNum: " + p_query["queryNumber"]
es1 = Elasticsearch(urls='http://localhost', port=9200, timeout=500)
res1 = es1.search(index=indexName, doc_type=docType,
body=query_builder_normal(p_query["queryTerms"], p_alpha, p_tie))
rank = 1
resultstring = ""
for hit in res1['hits']['hits']:
resultstring = resultstring + p_query["queryNumber"] + " 0 " + str(hit["_id"]) + " " + str(rank) + " " + \
str(hit["_score"]) + " " + indexName + "\n"
rank += 1
return resultstring
# Flushing before closing
rs = es.indices.flush(index=indexName)
print("Flushing - response: '%s'" % rs)
# Clear Cache
rs = es.indices.clear_cache(index=indexName)
print("Clearing Cache - response: '%s'" % rs)
# Closing the index, required before changing the index setting
rs = es.indices.close(index=indexName)
print("Closed - response: '%s'" % rs)
# Setting the index
request_body = {
"settings": {
"similarity": {
"sim_title": {
"type": "BM25",
"b": b,
"k1": k
},
"sim_body": {
"type": "BM25",
"b": b,
"k1": k
}
}
}
}
es.indices.put_settings(index=indexName, body=request_body)
# print("Configured - response: '%s'" % res)
# reopen index after configure the bm25 parameter
es.indices.open(index=indexName)
es.cluster.health(index=indexName, wait_for_status='green') # wait until index ready
print("Opened index {0} --> b: {1}, k1: {2}".format(indexName, b, k))
for t_alpha in xrange(0, 11, 1):
for tie in tieBreakers:
weights = "_alpha" + str(t_alpha)
fw = open(topPath + topPrefix + weights, 'w')
p = multiprocessing.Pool()
func = partial(es_search, float(t_alpha)/10, tie)
results = p.map(func, queries)
p.close()
p.join()
for res in results:
fw.write(res)
print ("Scheme: {0} Completed, Duration: {1} seconds".format(weights, time.time() - indexLap))
indexLap = time.time()
fw.close()
print (" Duration ", time.time()-startTime)
| [
"g.zuccon@uq.edu.au"
] | g.zuccon@uq.edu.au |
b1b7bb8fdb8895c24c93c14eadebe639841536e2 | 7c45efb5a5c66305d7c4ba8994d3b077612df109 | /main/apps/first_app/models.py | 11cbbd145249e0e96de86f8b685139e3f1367e49 | [] | no_license | globedasher/django-dojo | c245f35b276402b6df6205a8612deb0089d34612 | dc27d289b8986b4fb910ef42f7bf483c385a3b4e | refs/heads/master | 2020-07-30T04:04:02.054414 | 2018-05-01T04:32:05 | 2018-05-01T04:32:05 | 73,635,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Users(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField(max_length=100)
password = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Messages(models.Model):
message = models.TextField(max_length=1000)
user_id = models.ForeignKey(Users)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Comments(models.Model):
message_id = models.ForeignKey(Messages)
user_id = models.ForeignKey(Users)
comment = models.TextField(max_length=1000)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"globe.dasher@gmail.com"
] | globe.dasher@gmail.com |
745ef8ec39cce58059ef0970bd2a1901a4a89445 | 91065c2ec8083982c774deba85918ce39951d310 | /pywind/lib/configfile.py | 62b38a92e5cd827a807a249e744d15cf2b3bf9d7 | [
"BSD-2-Clause"
] | permissive | xyz12810/fdslight | 4e25a2e86aacbec72fd621984fe47d98ebf1e2de | 3ceb751d81da90ed43d78e8499a3a4e53b70792e | refs/heads/master | 2020-05-30T15:47:02.502115 | 2019-05-31T16:53:06 | 2019-05-31T16:53:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | #!/usr/bin/env python3
class IniFileFmtErr(Exception): pass
class _Iniparser(object):
def __drop_comment(self, seq):
"""去除注释"""
line_n = 0
rs = []
for s in seq:
line_n += 1
# 去除空行
t = s.rstrip()
t = t.replace("\t", "")
if not t: continue
if t[0] == " ": raise IniFileFmtErr(s)
if t[0] == "=": raise IniFileFmtErr(s)
if t[0] == ";": continue
if t[0] == "#": continue
rs.append(t)
return rs
def __get_key_val(self, s):
pos = s.find("=")
if pos < 1: return None
name = s[0:pos].rstrip()
pos += 1
value = s[pos:].lstrip()
return (name, value,)
def __get_result(self, seq):
result = {}
name = ""
for s in seq:
s = s.rstrip()
if s[0] == "[":
s = s.replace("[", "")
s = s.replace("]", "")
name = s
continue
rs = self.__get_key_val(s)
if not rs: continue
k, v = rs
if name not in result: result[name] = {}
result[name][k] = v
return result
def __split(self, sts):
"""对数据进行分割"""
sts = sts.replace("\r", '')
seq = sts.split("\n")
return seq
def parse(self, sts):
seq = self.__split(sts)
seq = self.__drop_comment(seq)
result = self.__get_result(seq)
return result
def ini_parse_from_file(fpath):
with open(fpath, "r") as f:
data = f.read()
p = _Iniparser()
return p.parse(data)
def ini_parse_from_sts(sts):
p = _Iniparser()
return p.parse(sts)
| [
"freekai@outlook.com"
] | freekai@outlook.com |
52509d7f84fa6af645fc378bda7e6f1e51793056 | f3b233e5053e28fa95c549017bd75a30456eb50c | /tyk2_input/47/47-55_MD_NVT_rerun/set_1ns_equi_1_m.py | 7132c58751f5bf0ee7f44b660e344af164606a52 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | import os
dir = '/mnt/scratch/songlin3/run/tyk2/L47/MD_NVT_rerun/ti_one-step/47_55/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_1_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1_m.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../47-55_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
22cb921734eed9e527220f9cf2f2cfa132ddb79f | faa83d63a23aec7c4f45c6ce6d140985a9fb2d50 | /package/diana/utils/gateways/persistent_map.py | 1c6b8457cd8e1d83d78429c28d9b53f4b79e86bf | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | thomasyi17/diana2 | dbf23382f5f84bd9cf86ce531f46452f0083e7f6 | 983e58ef0a5fe0d820a56c41c823369754019171 | refs/heads/master | 2023-03-24T15:13:29.421614 | 2022-06-12T21:42:28 | 2022-06-12T21:42:28 | 167,248,482 | 0 | 0 | MIT | 2019-06-25T19:41:36 | 2019-01-23T20:22:50 | Python | UTF-8 | Python | false | false | 5,207 | py | import pickle, logging, csv, hashlib, os, glob
from multiprocessing import Queue
import attr
from abc import ABC
import time
def md5_digest(value):
return hashlib.md5(value.encode("utf8")).hexdigest()
@attr.s
class PersistentMap(ABC):
keyhash_func = attr.ib( default=md5_digest )
fn = attr.ib( type=str, default=None )
observed_keys = attr.ib( init=False, factory=set)
def clear(self):
if os.path.isfile(self.fn):
os.remove(self.fn)
def put(self, key, item, early_exit=True):
logger = logging.getLogger("PMap")
if self.keyhash_func:
key = self.keyhash_func(key)
logger.debug("Adding to pmap")
if early_exit and (key in self.observed_keys):
logger.debug("Item already exists in pmap (from observed), skipping")
return
data = self.read_data(key)
if early_exit:
for _key in data.keys():
self.observed_keys.add(_key)
if early_exit and (key in data.keys()):
logger.debug("Item already exists in pmap (read file), skipping")
return
logger.debug("Adding item to key")
data[key] = item
self.write_data(data, key)
def get(self, key):
logger = logging.getLogger("PMap")
logger.debug("Retrieving from pmap")
if self.keyhash_func:
key = self.keyhash_func(key)
data = self.read_data(key)
return data.get(key)
def read_data(self, key):
raise NotImplemented
def write_data(self, data, key):
raise NotImplemented
def run(self, queue, early_exit=True):
logger = logging.getLogger("PMap")
while True:
# logger.debug("Checking queue: {}".format(
# "Empty" if queue.empty() else "Pending"))
if not queue.empty():
key, item = queue.get(False)
logger.debug("Found ({}, {})".format(key, item))
self.put(key, item, early_exit=early_exit)
time.sleep(0.01)
@attr.s
class PicklePMap(PersistentMap):
fn = attr.ib( type=str, default="/tmp/cache.pkl" )
def read_data(self, *args, **kwargs):
if not os.path.isfile(self.fn):
return {}
with open(self.fn, "rb") as f:
data = pickle.load(f)
# logging.debug("READING PKL: {}".format(data))
return data
def write_data(self, data, *args, **kwargs):
with open(self.fn, "wb") as f:
pickle.dump(data, f)
# logging.debug("WRITING PKL: {}".format(data))
@attr.s
class CSVPMap(PersistentMap):
fn = attr.ib( type=str, default="/tmp/cache.csv" )
keyfield = attr.ib( default="_key" )
fieldnames = attr.ib( default=None )
def read_data(self, *args, **kwargs):
if not os.path.isfile(self.fn):
return {}
with open(self.fn, "r") as f:
data = {}
reader = csv.DictReader(f)
for row in reader:
_key = row.pop(self.keyfield)
data[_key] = {**row}
# logging.debug("READING CSV: {}".format(data))
return data
def write_data(self, data, *args, **kwargs):
rows = []
for k,v in data.items():
_row = v
_row[self.keyfield] = k
rows.append(_row)
if self.fieldnames:
fieldnames = self.fieldnames
else:
_fieldnames = set()
for item in rows:
for k in item.keys():
_fieldnames.add(k)
fieldnames = list(_fieldnames)
with open(self.fn, "w") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction="ignore")
writer.writeheader()
writer.writerows(rows)
# logging.debug("WRITING CSV: {}".format(data))
@attr.s
class ArrayPMapMixin(PersistentMap):
prefix_len = attr.ib(default=2) # 16*16 = 256 files for a hexdigest
backends = attr.ib(init=False, factory=dict)
def clear(self):
for be in self.backends.values():
be.clear()
def mk_backend(self) -> PersistentMap:
raise NotImplementedError
def backend_for(self, key):
be_key = key[0:self.prefix_len]
be = self.backends.get(be_key)
if not be:
if self.fn:
fn = self.fn.format(be_key)
else:
fn = None
if isinstance(self, PicklePMap):
be = PicklePMap(fn=fn)
elif isinstance(self, CSVPMap):
be = CSVPMap(fn=fn, keyfield=self.keyfield, fieldnames=self.fieldnames)
self.backends[be_key] = be
return be
def read_data(self, key, *args, **kwargs):
be = self.backend_for(key)
data = be.read_data()
return data
def write_data(self, data, key, *args, **kwargs):
be = self.backend_for(key)
be.write_data(data)
@attr.s
class PickleArrayPMap(ArrayPMapMixin, PicklePMap):
fn = attr.ib( type=str, default="/tmp/cache-{}.pkl" )
@attr.s
class CSVArrayPMap(ArrayPMapMixin, CSVPMap):
fn = attr.ib( type=str, default="/tmp/cache-{}.csv" )
| [
"derek_merck@brown.edu"
] | derek_merck@brown.edu |
8404702afa848659ee2f70557d4dbd6a675c9257 | 31134be1044464e10d9f241a2ab235aa2daf9a9e | /iris_plot/main.py | 0f02fc922317896e5f0e8dbd3b8a9ae66e0e30c8 | [] | no_license | amitsaha/conda-kapsel-iris | 10d7d532514211651edb111b8fb21e543e6f4213 | b6df81ed5ec01e0c05cb9de963950509000fa06b | refs/heads/master | 2021-01-20T20:27:55.366708 | 2016-07-24T10:34:28 | 2016-07-24T10:34:28 | 64,060,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | import os
import pandas as pd
from bokeh.plotting import Figure
from bokeh.io import curdoc
iris_csv = os.getenv("IRIS_CSV")
flowers = pd.read_csv(iris_csv)
colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}
colors = [colormap[x] for x in flowers['species']]
p = Figure(title = "Iris Morphology")
p.xaxis.axis_label = 'Petal Length'
p.yaxis.axis_label = 'Petal Width'
p.circle(flowers["petal_length"], flowers["petal_width"],color=colors, fill_alpha=0.2, size=10)
curdoc().title = "Iris Example"
curdoc().add_root(p)
| [
"amitsaha.in@gmail.com"
] | amitsaha.in@gmail.com |
497e0341d11ab5aad2ce2f6a2126edfc9098569d | bc162308dc046e682f812da63b10ed051003f18f | /tests/eth2/fixtures/helpers.py | 9deac2e3958dd5ca8999bfb49989eec45d1c0129 | [
"MIT"
] | permissive | davesque/trinity | 23db37dc8299b5bb0d352f98e82f0912e0bce7ed | 70b482af4235eb835c90b3c2471be2951b494afe | refs/heads/master | 2020-04-18T04:48:48.411902 | 2019-08-07T16:01:33 | 2019-08-07T16:01:33 | 167,252,958 | 0 | 0 | null | 2019-01-23T20:56:57 | 2019-01-23T20:56:57 | null | UTF-8 | Python | false | false | 1,799 | py | import pytest
from eth_utils import (
to_tuple,
)
from eth2.configs import (
Eth2GenesisConfig,
)
from eth2.beacon.db.chain import BeaconChainDB
from eth2.beacon.state_machines.forks.serenity import (
SerenityStateMachine,
)
from eth2.beacon.tools.fixtures.loading import (
get_all_test_files,
)
#
# pytest setting
#
def bls_setting_mark_fn(bls_setting):
if bls_setting:
return pytest.mark.noautofixture
return None
@to_tuple
def get_test_cases(root_project_dir, fixture_pathes, config_names, parse_test_case_fn):
# TODO: batch reading files
test_files = get_all_test_files(
root_project_dir,
fixture_pathes,
config_names,
parse_test_case_fn,
)
for test_file in test_files:
for test_case in test_file.test_cases:
bls_setting = test_case.bls_setting if hasattr(test_case, 'bls_setting') else False
yield mark_test_case(test_file, test_case, bls_setting=bls_setting)
def get_test_id(test_file, test_case):
description = test_case.description if hasattr(test_case, 'description') else ''
return f"{test_file.file_name}:{test_case.index}:{description}"
def mark_test_case(test_file, test_case, bls_setting=False):
test_id = get_test_id(test_file, test_case)
mark = bls_setting_mark_fn(bls_setting)
if mark:
return pytest.param(test_case, test_file.config, id=test_id, marks=(mark,))
else:
return pytest.param(test_case, test_file.config, id=test_id)
#
# State execution
#
def get_sm_class_of_config(config):
return SerenityStateMachine.configure(
__name__='SerenityStateMachineForTesting',
config=config,
)
def get_chaindb_of_config(base_db, config):
return BeaconChainDB(base_db, Eth2GenesisConfig(config))
| [
"hwwang156@gmail.com"
] | hwwang156@gmail.com |
23826a0a7381a9ac7265e286222b48e18f806b93 | 411eff94020c192d5e5f657fa6012232ab1d051c | /game/tests/anim_test2.py | 4e36276699b967e0cb864ee480f7dc889dd5ddfb | [] | no_license | xMakerx/cio-src | 48c9efe7f9a1bbf619a4c95a4198aaace78b8491 | 60b2bdf2c4a24d506101fdab1f51752d0d1861f8 | refs/heads/master | 2023-02-14T03:12:51.042106 | 2021-01-15T14:02:10 | 2021-01-15T14:02:10 | 328,268,776 | 1 | 0 | null | 2021-01-15T15:15:35 | 2021-01-09T23:51:37 | Python | UTF-8 | Python | false | false | 187 | py | from src.coginvasion.standalone.StandaloneToon import *
don = loader.loadModel("phase_4/models/char/daisyduck_1600.bam")
don.reparentTo(render)
don.place()
base.startDirect()
base.run() | [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
c04d0d2bd175dc68572322d6021223cdd88a5d0c | 78a15793be1ba71ea7eecee33abef4ecbe11d8f2 | /apps/chat/urls.py | 0067ab83ee65d720cb56865946edd94bd654f009 | [] | no_license | teresaylin/my2009 | f5df9c62492d4c88931f6aa45af31ee88dbe3a1a | 2486750ad73df313d596497b0eb7f4c47518e6a6 | refs/heads/master | 2021-03-21T23:53:55.581074 | 2016-06-01T18:13:44 | 2016-06-01T18:13:44 | 23,392,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from django.conf.urls import patterns, include, url
from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register(r'rooms', RoomViewSet)
router.register(r'messages', RoomMessageViewSet)
router.register(r'room-users', RoomUserViewSet)
urlpatterns = [
url(r'', include(router.urls)),
url(r'^rooms/(?P<roomName>[-\w]+)/messages/$', RoomMessagesView.as_view(), name='room-messages'),
]
| [
"jam.hann@gmail.com"
] | jam.hann@gmail.com |
273ed8558200ea611b094077fa0425248208eec7 | 83d9b630bdc5535d67e35d69768b4d41fc4c9653 | /assignment1/assignment1/cs231n/features.py | ca2f313c8774c4c166eedebf60412234b300f9a1 | [] | no_license | chenshaobin/assignment_CS231n | 2c8820f748fca6fb41cdb272a81c940f8c0a0e52 | 132c670d22dd37e6b4c1bd9da944c1f62a639d64 | refs/heads/main | 2022-12-30T21:05:12.500255 | 2020-10-18T04:49:40 | 2020-10-18T04:49:40 | 301,309,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,526 | py | from __future__ import print_function
from builtins import zip
from builtins import range
from past.builtins import xrange
import matplotlib
import numpy as np
from scipy.ndimage import uniform_filter
def extract_features(imgs, feature_fns, verbose=False):
"""
Given pixel data for images and several feature functions that can operate on
single images, apply all feature functions to all images, concatenating the
feature vectors for each image and storing the features for all images in
a single matrix.
Inputs:
- imgs: N x H X W X C array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i.
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (N, F_1 + ... + F_k) where each column is the concatenation
of all features for a single image.
"""
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature functions must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((num_images, total_feature_dim))
imgs_features[0] = np.hstack(first_image_features).T
# Extract features for the rest of the images.
for i in range(1, num_images):
idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
next_idx = idx + feature_dim
imgs_features[i, idx:next_idx] = feature_fn(imgs[i].squeeze())
idx = next_idx
if verbose and i % 1000 == 999:
print('Done extracting features for %d / %d images' % (i+1, num_images))
return imgs_features
def rgb2gray(rgb):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])
def hog_feature(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
Navneet Dalal and Bill Triggs, CVPR 2005
Parameters:
im : an input grayscale or rgb image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
else:
image = np.at_least_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[round(cx/2)::cx, round(cy/2)::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax # matplotlib.colors.rgb_to_hsv函数要求的输入参数在0-1之间
imhist, bin_edges = np.histogram(hsv[:, :, 0], bins=bins, density=normalized) #在HSV颜色空间中使用色相通道(hue)的颜色直方图
imhist = imhist * np.diff(bin_edges) # 计算得到每个灰度值的直方图对应的面积
# return histogram
return imhist
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
| [
"13531194616@163.com"
] | 13531194616@163.com |
169b42cb29b0f9fc8d36773fed6c41cfb40f897f | 277290f8cd6cc5bcb77faaf69a045f5074a988e5 | /last-stone-weight-ii.py | 129b04ec8186644cc9d6e8b0eba35baf4b475185 | [] | no_license | shankarkrishnamurthy/problem-solving | aed0252d9ca6d6b51e9a7d8d5e648343b4abf322 | f9bc1db1cc99b10a87a2fa51869924aa10df4c99 | refs/heads/master | 2023-03-20T18:48:45.107058 | 2023-03-06T03:24:53 | 2023-03-06T03:24:53 | 123,035,515 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | class Solution(object):
def lastStoneWeightII(self, stones):
"""
:type stones: List[int]
:rtype: int
"""
dp,s={0:0},sum(stones)
for i in stones:
for x in dp.keys():
dp[i+x]=dp.get(i+x,0) + 1
#print dp
return min([abs(s-2*i) for i in dp])
print Solution().lastStoneWeightII([2,1,4,1,8,7])
| [
"kshan_77@yahoo.com"
] | kshan_77@yahoo.com |
1b4d75da96eec721917ac7f7fc5a1d54f40132e1 | 3bd0a2e48ada5958f91b0deae3bef83920ecf770 | /Geometry/HcalAlgo/test/python/dumpHcalXtal_cfg.py | 04ff8eb310197111e9f4fd210dccdd4f37aabc5b | [
"Apache-2.0"
] | permissive | anmishra0/cmssw | 6716783874b48ca6866e6bd17d3756d169956101 | d920ba0d5e8b46fc1e2e3641d0aac122503e5811 | refs/heads/master | 2021-06-01T13:47:33.834689 | 2019-09-20T07:35:13 | 2019-09-20T07:35:13 | 209,823,961 | 1 | 0 | Apache-2.0 | 2019-09-20T15:31:35 | 2019-09-20T15:31:34 | null | UTF-8 | Python | false | false | 822 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("DUMP")
process.load("Geometry.HcalAlgo.testGeomHcalXtal_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('G4cerr')
process.MessageLogger.categories.append('G4cout')
process.MessageLogger.categories.append('HCalGeom')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.add_(cms.ESProducer("TGeoMgrFromDdd",
verbose = cms.untracked.bool(False),
level = cms.untracked.int32(14)
))
process.dump = cms.EDAnalyzer("DumpSimGeometry",
outputFileName = cms.untracked.string('HcalXtal.root')
)
process.p = cms.Path(process.dump)
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.