blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3166ac3d5064df4705a84f1e7ae3c94963ca5bd | b55801df5a6f4fe8abfbd0bb61b92906cbf2a510 | /pyLTM/pyltm/learner/mixed_clique_sufficient_statistics.py | 8a8e2f6c68b7fd2a1e8bb6f0072d30275d2902ec | [] | no_license | rezaarmand/ltvae-release | 51735fb12a33c685cb0198909355025cc84736f0 | 3a26e276e2a57363c0fb84cc1f8e492ff53bb5db | refs/heads/master | 2022-02-18T10:16:04.736113 | 2019-09-11T02:23:26 | 2019-09-11T02:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,416 | py | '''
Created on 12 Sep 2018
@author: Bryan
'''
from .sufficient_statistics import SufficientStatistics
from pyltm.model.potential.cgpotential import CGPotential
from pyltm.model import JointContinuousVariable, CGParameter
import collections
import numpy as np
from pyltm.model.parameter import cgparameter
from pyltm.model.variable.discrete_variable import DiscreteVariable
from pyltm.model.parameter.cptparameter import CPTParameter
from pyltm.util.utils import logsumexp
class MixedCliqueSufficientStatistics(SufficientStatistics):
'''
classdocs
'''
def __init__(self, node, batch_size):
'''
node: Clique
'''
jointVariables = node.jointVariable
discreteVariable = node.discreteVariable
if isinstance(jointVariables, JointContinuousVariable):
jointVariables = list(jointVariables.variables)
elif isinstance(jointVariables, collections.Iterable):
jointVariables = list(jointVariables)
assert isinstance(jointVariables, list)
self._continuousVariables = jointVariables
self._discreteVariable = discreteVariable
self.resetParameters(node.potential, batch_size)
def resetParameters(self, cliquepotential, batch_size):
cardinality = 1 if self._discreteVariable is None else self._discreteVariable.getCardinality()
self.size = cardinality
logp = cliquepotential.logp.copy() # (K, )
logconstant = logsumexp(logp)
self.p = np.exp(logp - logconstant) # normalize
self.mu = cliquepotential.mu.copy() # (K, D)
self.covar = cliquepotential.covar.copy() # (K, D, D)
# self.normalize()
self.p = self.p * batch_size # sufficient counts
for i in range(cardinality):
# sufficient sum_square
self.covar[i] = (self.covar[i] + np.outer(self.mu[i], self.mu[i])) * self.p[i]
# sufficient sum
self.mu[i] = self.mu[i] * self.p[i]
def normalize(self, constant=None):
if constant is None:
constant = np.sum(self.p)
self.p /= constant
return constant
def reset(self):
self.p[:] = 0
self.mu[:] = 0
self.covar[:] = 0
def add(self, potential):
'''potential: batched cliquepotential'''
batch_size = potential.logp.shape[0]
# maybe normalize it in case hasn't been normalized
logp = potential.logp - logsumexp(potential.logp, axis=1, keepdims=True)
for i in range(potential.size):
weight = np.expand_dims(np.exp(logp[:, i]), axis=1) # (N, 1)
self.p[i] += np.sum(weight)
self.mu[i] += np.sum(potential.mu[:, i, :] * weight, axis=0) # (N, D) x (N, 1)
self.covar[i] += np.sum(np.concatenate([np.expand_dims(np.outer(potential.mu[j, i, :], potential.mu[j, i, :]) * weight[j], axis=0)
for j in range(batch_size)], axis=0), axis=0)
def update(self, batchStatistics, learning_rate):
assert(self.size==batchStatistics.size)
self.p[:] = self.p + learning_rate * (batchStatistics.p - self.p)
self.mu[:] = self.mu + learning_rate * (batchStatistics.mu - self.mu)
self.covar[:] = self.covar + learning_rate * (batchStatistics.covar - self.covar)
def computePotential(self, variable, parent):
if isinstance(variable, JointContinuousVariable):
parameters = [None]*self.size
for i in range(self.size):
parameters[i] = CGParameter(1, len(self.mu[i]), self.computeMean(self.p[i], self.mu[i]),
self.computeCovariance(self.p[i], self.mu[i], self.covar[i]))
return parameters
elif isinstance(variable, DiscreteVariable):
# only possibility is that variable is root
parameter = CPTParameter(self.size)
parameter.prob[:] = self.p
parameter.normalize()
return parameter
def computeMean(self, p, mu):
if p == 0:
return np.zeros_like(mu)
return mu / p
def computeCovariance(self, p, mu, covar):
if p==0:
return np.ones_like(covar)
mu = self.computeMean(p, mu)
return covar / p - np.outer(mu, mu)
| [
"eelxpeng@gmail.com"
] | eelxpeng@gmail.com |
869e9c59cdc059f3f26e0773ffa1001df5e61962 | ad7e79fd9538dede52604603dde6a014a95ce425 | /urls.py | 805049b0f48b475a5e5771c4cffbfcfdad869490 | [] | no_license | coco-ty/Course_Registration_Automation | b66ebf3ac19a0a7e6d1d1aafeb2afa3350b936ab | 80fab09af70d67e74896f76f233601bd7b2077e2 | refs/heads/master | 2021-01-10T13:15:01.923534 | 2015-10-10T06:30:57 | 2015-10-10T06:30:57 | 43,997,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | from django.conf.urls.defaults import *
urlpatterns = patterns('main.views',
(r'^$', 'home'),
(r'^login/$', 'login'),
(r'^logout/$', 'logout'),
(r'^process/?$', 'process'),
(r'^authenticate/?$', 'authenticate'),
(r'^rate/?$', 'rate'),
(r'^sites/$','sites'),
(r'^sites/(?P<url>.+)/$','sites'),
(r'^courses/$','courses'),
(r'^courses/(?P<code>.+)/$','courses'),
(r'^sections/(?P<id>.+)/$','sections'),
(r'^classrooms/(?P<id>.+)/$','classrooms'),
(r'^depts/$','department'),
(r'^depts/(?P<deptcode>.+)/$','department'),
(r'^books/$','textbook'),
(r'^books/(?P<isbn>.+)/$','textbook'),
)
| [
"eliz.clair914@gmail.com"
] | eliz.clair914@gmail.com |
604d6590d6ef3fce8b32b65c1c5a36acd2fa899e | 4bbc78dfcb36bad90f4b5a4d1a1567bc820f22df | /Flaskweb1/venv/bin/pip3.6 | 6947d567b45a8cb1b6a17c1892873c702ec5d1ec | [] | no_license | Ernestbengula/python | 38d09dad271cbc966eca65804ea7bcee2c248915 | 168d385e3258cc16db811cf34af1b269d7f055c7 | refs/heads/master | 2020-07-24T03:34:18.863179 | 2019-10-30T12:34:21 | 2019-10-30T12:34:21 | 207,789,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | 6 | #!/root/PycharmProjects/Flaskweb1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.6'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.6')()
)
| [
"ernestbengula2017@gmail.com"
] | ernestbengula2017@gmail.com |
cf72bc593892466402f683cff40798c3ae48bb49 | 5c13b223102d2f7559f2855eb6b8716de0708638 | /pythonCollections/tuple/tuple.py | 3d197cc7541880a3cd8938b71f2598bf122184ab | [] | no_license | Akhilvijayanponmudy/pythondjangoluminar | c0ab8e9ea1f2ef1ba034732de15b479d9f7a24da | 1fc73790c530518f4a747ed6a4fea3bfbe27687e | refs/heads/master | 2023-04-01T18:37:35.112270 | 2021-04-01T03:24:08 | 2021-04-01T03:24:08 | 328,891,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | #define()
#store different type of data
#insertion order preserved
#duplicate allowed
#tuple objects are immutable(not support update) | [
"akhilvijayanponmudy@gmail.com"
] | akhilvijayanponmudy@gmail.com |
48a939c169881e193fbae7563fa3afb68881c6e0 | f4c49fb03e1358c675f6d29ad265ac34fe8edee4 | /LeducPoker/LeducPokerGame.py | d0b91936149476f7e540124f8e3e6ad39683a9af | [] | no_license | mzktbyjc2016/nfsp-pytorch | 4b36afd0db8c8634ef71be5cf4b6b97fe814da3f | 125268908919661a508abc7bddc1015a92116f96 | refs/heads/master | 2020-06-25T09:25:20.115407 | 2019-03-03T02:46:49 | 2019-03-03T02:46:49 | 199,271,302 | 2 | 0 | null | 2019-07-28T10:09:19 | 2019-07-28T10:09:19 | null | UTF-8 | Python | false | false | 9,630 | py | from typing import Tuple, Optional, List
import random
import numpy as np
import copy
class PlayerActions:
BET_RAISE = 2
CHECK_CALL = 1
FOLD = 0
ALL_ACTIONS = [FOLD, CHECK_CALL, BET_RAISE]
ACTION_TO_CHAR = {
FOLD: "f",
CHECK_CALL: "c",
BET_RAISE: "r"
}
class LeducNode(object):
def __init__(
self,
bet_sequences: List[Tuple[PlayerActions]],
board_card: Optional[int]):
assert len(bet_sequences) == 2
self._bet_sequences = bet_sequences
self.board_card = board_card
if self.game_round == 1:
assert self.board_card is not None or len(self.bet_sequences[1]) == 0
@property
def game_round(self) -> int:
return 1 if len(self.bet_sequences[0]) >= 2 and self.bet_sequences[0][-1] == PlayerActions.CHECK_CALL else 0
def can_take_action(self, action) -> bool:
if action == PlayerActions.CHECK_CALL:
return True
elif action == PlayerActions.FOLD:
return self.can_fold
elif action == PlayerActions.BET_RAISE:
return self.can_raise
raise RuntimeError("Bad action")
@property
def can_raise(self) -> bool:
relevant_bet_sequences = self._relevant_bet_sequence()
if len(relevant_bet_sequences) <= 1:
return True
else:
return relevant_bet_sequences.count(PlayerActions.BET_RAISE) < 2
def fixup_action(self, action: PlayerActions):
if action == PlayerActions.FOLD and not self.can_fold:
return PlayerActions.CHECK_CALL
elif action == PlayerActions.BET_RAISE and not self.can_raise:
return PlayerActions.CHECK_CALL
else:
return action
@property
def can_fold(self) -> bool:
relevant_bet_sequence = self._relevant_bet_sequence()
if len(relevant_bet_sequence) == 0:
return False
else:
return relevant_bet_sequence[-1] == PlayerActions.BET_RAISE
def _relevant_bet_sequence(self) -> Tuple[PlayerActions]:
if self.game_round == 0:
relevant_bet_sequence = self.bet_sequences[0]
else:
relevant_bet_sequence = self.bet_sequences[1]
return relevant_bet_sequence
@property
def bet_sequences(self) -> List[Tuple[PlayerActions]]:
return self._bet_sequences
@property
def is_terminal(self) -> bool:
if len(self._bet_sequences[0]) > 0 and self._bet_sequences[0][-1] == PlayerActions.FOLD:
return True
if len(self._bet_sequences[1]) <= 1:
return False
if self._bet_sequences[1][-1] != PlayerActions.BET_RAISE:
return True
return False
@property
def player_to_act(self) -> int:
if self.game_round == 1 and self.board_card is None:
return -1 # Chance
relevant_bet_sequence = self._relevant_bet_sequence()
return len(relevant_bet_sequence) % 2
# Returns cost of taking action
def add_action(self, action: PlayerActions) -> (int, PlayerActions):
action = self.fixup_action(action)
game_round = self.game_round
retval = 0
if game_round == 0:
# Lua code doesn't charge for antes
# if len(self.bet_sequences[0]) < 2:
# retval = 1 # Antes
if len(self.bet_sequences[0]) > 0 and self.bet_sequences[0][-1] == PlayerActions.BET_RAISE:
retval += 2 # 2 to call
if action == PlayerActions.BET_RAISE:
retval += 2
self.bet_sequences[0] = self.bet_sequences[0] + (action,)
else:
if len(self.bet_sequences[1]) > 0 and self.bet_sequences[1][-1] == PlayerActions.BET_RAISE:
retval = 4 # 4 to call
if action == PlayerActions.BET_RAISE:
retval += 4
self.bet_sequences[1] = self.bet_sequences[1] + (action,)
if self.game_round == 1 and self.player_to_act != -1:
assert self.board_card is not None
else:
assert self.board_card is None
# one fixup: if they folded
# if action == PlayerActions.FOLD:
# if game_round == 0 and len(self.bet_sequences[0]) <= 2:
# retval = 1 # Ante
# else:
# retval = 0
# Lua code doesn't charge for antes
if action == PlayerActions.FOLD:
retval = 0
# return the action cost and the fixed-up action
return retval, action
def _get_half_pot(self) -> float:
half_pot = 1 # Antes
to_call = 0
for action in self._bet_sequences[0]:
if action == PlayerActions.FOLD:
return half_pot
elif action == PlayerActions.CHECK_CALL:
half_pot += to_call
to_call = 0
elif action == PlayerActions.BET_RAISE:
half_pot += to_call
to_call = 2
to_call = 0
for action in self._bet_sequences[1]:
if action == PlayerActions.FOLD:
return half_pot
elif action == PlayerActions.CHECK_CALL:
half_pot += to_call
to_call = 0
elif action == PlayerActions.BET_RAISE:
half_pot += to_call
to_call = 4
return float(half_pot)
def _get_winner(self, player_cards: List[int]) -> Optional[int]:
try:
fold_idx = self._bet_sequences[0].index(PlayerActions.FOLD)
unfolded_player = (fold_idx + 1) % 2
return unfolded_player
except ValueError:
pass
try:
fold_idx = self._bet_sequences[1].index(PlayerActions.FOLD)
unfolded_player = (fold_idx + 1) % 2
return unfolded_player
except ValueError:
pass
# Showdown
assert self.board_card is not None
player_normalized_cards = [player_cards[0] % 3, player_cards[1] % 3]
board_normalized_card = self.board_card % 3
if player_normalized_cards[0] == player_normalized_cards[1]:
return None
elif player_normalized_cards[0] == board_normalized_card:
return 0
elif player_normalized_cards[1] == board_normalized_card:
return 1
else:
return 0 if player_normalized_cards[0] > player_normalized_cards[1] else 1
def get_payoffs(self, player_cards: List[int]) -> np.ndarray:
if not self.is_terminal:
raise RuntimeError("Can't get payoffs for non-terminal")
half_pot = self._get_half_pot()
winner = self._get_winner(player_cards)
if winner is None:
return np.array([half_pot, half_pot])
if winner == 0:
return np.array([half_pot * 2.0, 0.0])
elif winner == 1:
return np.array([0.0, half_pot * 2.0])
class LeducInfoset(LeducNode):
def __init__(
self,
card: int,
bet_sequences: List[Tuple],
board_card: Optional[int]):
super().__init__(bet_sequences=bet_sequences, board_card=board_card)
self.card = card
def __str__(self):
card_to_char = {
0: "J",
1: "Q",
2: "K"
}
retval = card_to_char[self.card % 3]
if self.board_card is not None:
retval += card_to_char[self.board_card % 3]
retval += ":/"
retval += "".join(PlayerActions.ACTION_TO_CHAR[a] for a in self.bet_sequences[0])
if self.game_round == 1:
retval += "/"
retval += "".join(PlayerActions.ACTION_TO_CHAR[a] for a in self.bet_sequences[1])
retval += ":"
return retval
def __eq__(self, other):
if other is None:
return False
return (self.card == other.card and self._bet_sequences == other.bet_sequences
and self.board_card == other.board_card)
class LeducGameState(LeducNode):
def __init__(
self,
player_cards: List[int],
bet_sequences: List[Tuple],
board_card: Optional[int]):
self.player_cards = player_cards
super().__init__(bet_sequences=bet_sequences, board_card=board_card)
self.infosets = None
self._update_infosets()
def _update_infosets(self):
self.infosets = tuple(
LeducInfoset(card=card, bet_sequences=copy.deepcopy(self._bet_sequences), board_card=self.board_card) for card in
self.player_cards)
def deal_board_card(self):
assert self.board_card is None and self.player_to_act == -1
deck = list(LeducPokerGame.DECK)
deck.remove(self.player_cards[0])
deck.remove(self.player_cards[1])
self.board_card = random.choice(deck)
self._update_infosets()
def get_payoffs(self):
return LeducNode.get_payoffs(self, self.player_cards)
def add_action(self, action: PlayerActions):
retval = super().add_action(action)
if self.player_to_act == -1:
self.deal_board_card()
else:
self._update_infosets()
return retval
class LeducPokerGame(object):
NUM_CARDS = 6
DECK = tuple(range(6))
def __init__(self, player_cards: Optional[List[int]] = None):
if player_cards is None:
cards = random.sample(self.DECK, 2)
self.player_cards = cards
self.game_state = LeducGameState(self.player_cards, [(), ()], board_card=None)
| [
"thomas.j.johnson@gmail.com"
] | thomas.j.johnson@gmail.com |
338c46e909035b0796f32985fc0a5b1bd0f7175c | 2438cb198fc03a1de169e87a711692045df03ccd | /coffee.py | 223762c681b226d47103c27f06908dabfaa7fa38 | [] | no_license | Om1627/corrcoef | 7138db6aa6f425af21f53ad4b01b8b353a33f9d9 | ab2d088344ec078be851fe51df259174d64876ca | refs/heads/main | 2023-02-21T22:18:55.323855 | 2021-01-28T09:05:09 | 2021-01-28T09:05:09 | 333,700,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | import pandas as pd
import plotly.express as px
import numpy as np
import csv
def getDataSource(data_path):
Sleep=[]
Coffee=[]
with open(data_path) as csv_file:
csv_reader=csv.DictReader(csv_file)
for row in csv_reader:
Sleep.append(float(row["Coffee in ml"]))
Coffee.append(float(row["sleep in hours"]))
return{"x":Coffee,"y":Sleep}
def findCorrelation(datasource):
correlation=np.corrcoef(datasource["x"],datasource["y"])
print("correlation=",correlation[0,1])
def plot():
df= pd.read_csv("coffee.csv")
fig=px.scatter(df,x="Coffee in ml",y="sleep in hours")
fig.show()
def setup():
data_path="./coffee.csv"
datasource=getDataSource(data_path)
findCorrelation(datasource)
setup()
plot()
| [
"noreply@github.com"
] | noreply@github.com |
4df437bfa66912a489fbf5bbae79735164228493 | e10f72609ffd7c156303640aad26512f91e5b341 | /decode-string.py | fb86f5dd4eb14168aca4a8059adac6ac9b10ea95 | [] | no_license | CSLSDS/leetcode | c26e514b8ee47a59d2e71e9a4f5cda3e66538d41 | 6d00429d989c9ba05e2e4a682b3e2d82a8920aca | refs/heads/master | 2022-12-06T14:51:06.192262 | 2020-08-28T17:50:50 | 2020-08-28T17:50:50 | 291,098,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # https://leetcode.com/problems/decode-string/
from collections import deque
class Solution(object):
def decodeString(self, s):
"""
:type s: str
:rtype: str
"""
stack = deque() # stack for stashing strings and scalars in op order
scalar = 0 # initialize neutral storage for both scalars for
string = '' # multiplication as well as strings
for c in s:
if c == '[': # opens clause; push currently tracked items
stack.append(string)
stack.append(scalar)
string = '' # reset
scalar = 0 # reinitialize; required by line 24
elif c == ']': # ends clause; pop deferred items
num = stack.pop()
prev_str = stack.pop()
string = prev_str + num*string # concat current and prior string
scalar = 0 # reinitialize
elif c.isdigit():
scalar = scalar*10 + int(c) # accounts for single or multi-digit #
else:
string += c
return string | [
"lambdadatasciencelearning@gmail.com"
] | lambdadatasciencelearning@gmail.com |
39c078ee69d1098e1c91f37879882232c475e2f0 | 59b0ebc4249f20edd0e87dc63784c6e8c138c7fd | /.history/fibonacci_20180603232558.py | 0f355ae930f9f8d834a1e6a158738d3573e77163 | [] | no_license | Los4U/first_python_programs | f397da10be3ef525995f3f220e3b60012a6accaa | c3fc33a38c84abd292cb2e86de63e09434fc7fc4 | refs/heads/master | 2020-03-22T08:09:40.426118 | 2018-07-04T17:17:58 | 2018-07-04T17:17:58 | 139,748,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | i = 0
j = 1
k = 0
fib = 0
user_input = int(input("How many numbers print out? : "))
for fn in range(user_input):
#if i < 30:
print('{0:2d} {1:>10}'.format(fn, fib))
#print(fib)
fib = j+k
j = k
k = fib
#else:
# print("3")
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
e2043a4f3f9bfbda10f9e715da267f997ce686b1 | 84579f545dd8b62e2b3a4be8180078641a3000a7 | /manage.py | e6a3a38ffeeae91d089af92fd263bf91c11406f3 | [] | no_license | Prosper033/happyday | f46ac3a514337c73dbc5d41442ac09bef628025c | 4c0b9c07f141216f7199b2b58483d67b37fbdb18 | refs/heads/main | 2023-08-29T15:09:17.370543 | 2021-09-15T13:15:27 | 2021-09-15T13:15:27 | 406,769,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'happyday.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"idowuoluwaseun033@gmail.com"
] | idowuoluwaseun033@gmail.com |
da925a687a4a6763196e300aa014cbd8f853cc0e | 6cb5afbe80fe07f837848f56c1c0cc194534155c | /전기버스2.py | 0a419d679149cd2b9f819aedda22935b4e3d956d | [] | no_license | KoMinjae/codingtest | 9073dbd094e0675c0f3cac35085703d8d1c546f6 | 160dfc5f73cad9d1d00a9a497550ab34cdf31a32 | refs/heads/master | 2022-12-19T14:11:00.972578 | 2020-09-25T03:08:16 | 2020-09-25T03:08:16 | 285,505,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | def solution(N,bp):
answer = list()
battery = bp[0]
stack=list()
stack.append((1,battery,0))
mintemp=999999
while stack:
position, nowbattery, time = stack.pop(0)
#백트래킹 조건
if time <= mintemp:
if position == N:
answer.append(time)
if mintemp>time:
mintemp=time
else:
for i in range(1,nowbattery+1):
if position+i>=N:
stack.append((N,0,time))
else:
stack.append((position+i,bp[position+i-1],time+1))
return min(answer)
print(solution(10, [2, 1, 3, 2, 2, 5, 4, 2, 1])) | [
"alswovv@naver.com"
] | alswovv@naver.com |
9e11e55d93f9595fd71b03c927ce27548ea7f34c | 28afc0b162388258f58382068f4496cb417879aa | /utils/request.py | 85bfe51573a14fe97dc1f3ad842c09cfe01f9d86 | [] | no_license | a1wen/bmtest_sample | 395c94191fd53687df55b70585c5570045da14cb | 9cda82198dbecfb56c401f16b4ed7a08d9a1b83d | refs/heads/master | 2022-12-15T07:19:30.171868 | 2019-07-12T15:43:58 | 2019-07-12T15:43:58 | 196,605,164 | 0 | 0 | null | 2022-12-08T05:52:34 | 2019-07-12T15:42:38 | Python | UTF-8 | Python | false | false | 795 | py | from typing import Optional
from pydantic import BaseModel
class Request(BaseModel):
msg_id: str
first_name: str
surname: str
patronymic: Optional[str]
birth_date: str
class ByMsisdnRequest(Request):
msisdn: str
class ByPassportRequest(Request):
series: Optional[str]
number: str
document_type: int
issue_date: Optional[str]
issue_authority: Optional[str]
class SimpleCheckRequest(Request):
msisdn: Optional[str]
series: Optional[str]
number: str
document_type: int
issue_date: Optional[str]
issue_authority: Optional[str]
class FullCheckRequest(SimpleCheckRequest):
callback_url: Optional[str]
smev_ttl: Optional[int]
smev_ignore_cache: Optional[bool] = False
smev_priority: Optional[str] = 'medium'
| [
"artem.chuprina@inplatlabs.ru"
] | artem.chuprina@inplatlabs.ru |
85c2a8dc30a8c4d16a1497f4bad44935f7ca19d2 | 81485dc96f7539730bee976c7e8e3d5929c3df77 | /ProjectEuler145.py | a3fd109802113ff296456c4d9fc9e471357fb859 | [] | no_license | zfhrp6/PE | 6462621f5cb3812c7d8d9f591ad66382490661e2 | 4e64f6549bd50fb4c1ee5f580a76764935e35360 | refs/heads/master | 2020-04-05T13:45:56.936758 | 2015-12-11T08:29:50 | 2015-12-11T08:29:50 | 4,495,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
project euler problem 145
ある正の整数nについて、[n + reverse(n)]が奇数のみで表されるようなnが存在する。
えば、36 + 63 = 99, 409 + 904 = 1313 のように。この性質を持つ数を、reversibleと呼ぶことにする。
つまり、36, 63, 409, 904はrevesibleである。
先頭の0はnでもreverse(n)でも許されない。
1000未満には120個のreversibleな数が存在する。
10億(10^9)未満では、いくつのreversibleな数が存在するか。
"""
import time
t0 = time.time()
answer = 0
i = 0
while i < 10 ** 9:
i += 1
if i % 10 == 0:
continue
if i % 1000000 == 1:
print(i)
num = i + int(str(i)[::-1])
if "0" in str(num) or "2" in str(num) or "4" in str(num) or "6" in str(num) or "8" in str(num):
continue
else:
answer += 1
print(answer)
print(time.time() - t0, "seconds")
| [
"coricozizi@gmail.com"
] | coricozizi@gmail.com |
6bd5fb8e2cc28159a3d0726aa5efc0e21295b713 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/contour/_textsrc.py | 43bd0d62ed17e92c16a553b953658aaf6d67f0be | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 393 | py | import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="contour", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"noreply@github.com"
] | noreply@github.com |
e4c5682a8d2d2bdc622eb0592def31a8a389c27f | 7d23cbeaf8fae2dca34180d8fa084b14c28cd1fc | /src/test/common/page.py | 1ea314b59b6f07a21660fd49a4ddbc71dc2a616e | [] | no_license | Cnnnnnnn/test-framework | e29dfb34474f8e8fe52a2007334cc1668d046077 | 73f6e8fb88b8dc38bb53d3a88b7ae2faa9aa3521 | refs/heads/master | 2020-04-03T09:52:25.326538 | 2018-10-30T05:27:40 | 2018-10-30T05:27:40 | 155,178,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | import selenium.common.exceptions
from selenium.webdriver.support import expected_conditions
from src.test.common.browser import Browser
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import time
class Page(Browser):
def __init__(self, page=None, browser_type='chrome'):
self.accept_next_alert = True
if page:
self.driver = page.driver
else:
super(Page, self).__init__(browser_type=browser_type)
def get_driver(self):
return self.driver
def refresh(self):
self.driver.refresh()
time.sleep(1)
def find_element(self, *args):
try:
# 注意:以下入参为元组的元素,需要加*。Python存在这种特性,就是将入参放在元组里。
WebDriverWait(self.driver, 10).until(lambda driver: driver.find_element(*args).is_displayed())
# 注意:以下入参本身是元组,不需要加*
#WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(loc))
return self.driver.find_element(*args)
except:
print(u"%s 页面中未能找到 %s 元素" % (self, args))
def move_to_element(self, *args):
ele = self.find_element(*args)
ActionChains(self.driver).move_to_element(ele).perform()
time.sleep(1)
def switch_frame(self, name):
return self.driver.switch_to_frame(name)
def switch_default_content(self):
return self.driver.switch_to_default_content()
def switch_windows(self, new=True, old=False):
windows = self.driver.window_handles
if new:
self.driver.switch_to_window(windows[1])
if old:
self.driver.switch_to_window(windows[0])
def get_time(self, bottom=True):
if bottom:
place = '//div[@x-placement="bottom-start"]'
else:
place = '//div[@x-placement="top-start"]'
# 设置为2017年12月30日
time.sleep(1)
self.driver.find_element_by_xpath(place+'/div[@class="el-picker-panel__body-wrapper"]/div/div[@class="el-date-picker__header"]/span[1]').click()
self.driver.find_element_by_xpath(place+'/div[@class="el-picker-panel__body-wrapper"]/div/div[2]/table[@class="el-year-table"]/tbody/tr[3]/td[1]').click()
self.driver.find_element_by_xpath(place+'/div[@class="el-picker-panel__body-wrapper"]/div/div[2]/table[@class="el-month-table"]/tbody/tr[3]/td[4]').click()
self.driver.find_element_by_xpath(place+'/div[@class="el-picker-panel__body-wrapper"]/div/div[2]/table[@class="el-date-table"]/tbody/tr[5]/td[7]').click()
time.sleep(1)
def is_element_exist(self, *args):
try:
WebDriverWait(self.driver, 10).until(expected_conditions.presence_of_element_located(*args))
return True
except:
return False
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except selenium.common.exceptions.NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
pass
| [
"cnn@thinkerx.com"
] | cnn@thinkerx.com |
6d76d5cccf34b4397840d9054661e6b099cfab82 | 12bfb5bcf0c1fcf4ae0a09b39bb180d6d2f27236 | /manage.py | 11a8c800f2676621c04324f836e1331b9ca4dcc7 | [] | no_license | NonsoEz/Food_Vendor_App | b5e52371069595b129a4be4a065ae0f828225b86 | 550b785fe182a73f9ec85f033f9f1ae0b29f3325 | refs/heads/master | 2022-11-10T03:08:30.906210 | 2020-06-14T11:36:46 | 2020-06-14T11:36:46 | 272,182,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'daisy_delights.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"itsnonso97@yahoo.com"
] | itsnonso97@yahoo.com |
d190d2073bc71b201cd5a4cdf229f13d5ed9a0b0 | 43b5d7bdcc9ec7a7b38e25ce67d0a3a618362275 | /netdata/workers/worker_storage.py | c8bb6340fb8f56a87dc7c9535ace93fd81a71298 | [
"Apache-2.0"
] | permissive | mincode/netdata | c8b64d1ab99601823fba2d3c9d70b4960cd26ac9 | 4369a3bfb473509eff92083e03f214d5b75f6074 | refs/heads/master | 2021-03-22T04:33:04.076065 | 2018-07-27T19:54:48 | 2018-07-27T19:54:48 | 113,805,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | # Storage for a table of worker instances
from netdata.workers.json_storage import JSONStorage
class WorkerStorage(JSONStorage):
"""
Table of worker instances stored in a json file;
consisting of a list of pairs {'ip': ..., 'instance_id': ...}
"""
_instances_label = 'instances' # label in the dict to store the list of {'ip': ip_string, 'instance_id': id_string}
def __init__(self, path, name):
"""
Initizlize.
:param path: path to the storage file; empty means the current direcory.
:param name: file name, json file.
"""
super(WorkerStorage, self).__init__(path, name)
if self._instances_label not in self.data:
self.set(self._instances_label, [])
@property
def instances(self):
"""
List of instances.
:return list of {'ip':..., 'instance_id':....}
"""
return self.get(self._instances_label)
@property
def all_ids(self):
"""
List all instance ids.
:return list of all instance ids.
"""
return list(map(lambda x: x['instance_id'], self.instances))
@property
def all_ips(self):
"""
List all instance ips.
:return list of all instance ips.
"""
return list(map(lambda x: x['ip'], self.instances))
def insert(self, index, ip, instance_id):
"""
Insert new instance at given index.
:param index: index to insert at.
:param ip: ip address of new instance.
:param instance_id: id of new instance.
"""
new_instance = {'ip': ip, 'instance_id': instance_id}
if index == len(self.instances):
self.instances.append(new_instance)
else:
self.instances.insert(index, new_instance)
self.dump()
def delete(self, index):
"""
Delete entry.
:param index: index of entry to be deleted.
"""
del self.instances[index]
self.dump()
def delete_all(self):
"""
Delete all entries.
"""
self.set(self._instances_label, [])
| [
"manfred@minimair.org"
] | manfred@minimair.org |
f7877e8532fbb6c049c1839b2d061e9e511a9704 | 380b3102e143a11ecd89f5f2eeced8532b8c431e | /Week_02/homework2.py | 9fa4686c8f386443b55201bf48c2599d7541152d | [] | no_license | yoyoshuang/algorithm012 | 9fe1672ac5cb8d4abe515b7e02cf8b436c366b56 | 505caabc895999bb12e9ff7b42013b4af29655e4 | refs/heads/master | 2022-12-14T13:17:51.115992 | 2020-08-30T08:39:31 | 2020-08-30T08:39:31 | 280,764,795 | 0 | 1 | null | 2020-07-19T00:41:25 | 2020-07-19T00:41:24 | null | UTF-8 | Python | false | false | 954 | py | # N叉树的层次遍历
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
if root == None:
return []
outs = []
this_level = [root]
next_nodes = []
this_vals = []
while this_level :
next_nodes = [] # 保存下一个层次的所有儿子节点
tmp = [] # 保存当前层次的值
for n in this_level: # 遍历当前层次所有节点
tmp.append(n.val)
for child in n.children:
if child!=None:
next_nodes.append(child)
outs.append(tmp) # 结果加到输出列表中
this_level = next_nodes # 当前层次遍历完成,进入下一个层次
return outs | [
"haoshuang@rd.netease.com"
] | haoshuang@rd.netease.com |
c8ceb205029b263e2dcc73b72e5789d4fb2fb9fc | bc7f4a51dc8e98a6efb390949316c5d6ea9b18a2 | /devel/lib/python2.7/dist-packages/mavros_msgs/msg/_GPSRAW.py | cae8c0300eddf1c7613fa784634add8ab00ec2ff | [] | no_license | JethroPhuah/Catkin_ws | 952c22bbb8c72aecf7cdfe71a89f69b6bb0d7404 | cdbf9f25dc5f2ce83cf1bc32905bbe29c60786cd | refs/heads/master | 2023-06-30T23:28:02.816129 | 2021-08-05T10:25:21 | 2021-08-05T10:25:21 | 388,732,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | /home/jethro/catkin_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/msg/_GPSRAW.py | [
"jethrophuah@gmail.com"
] | jethrophuah@gmail.com |
1daeac4ecb17e7a2f26115ace37be515b4198865 | 897cb969990a5ae319547fd572a262d58a1e33a8 | /scripts/get_lumi.py | cf7754d90251baeaf51cecd347b356800a3c9aac | [] | no_license | KIT-CMS/Excalibur | cc5a028bf6ad29a636536c3dfc0ebdc0eacfbbb7 | 8c27e2fdd7b7d5a0439f6e63be2299b16f5291c0 | refs/heads/master | 2023-07-24T05:28:08.156998 | 2023-07-17T15:29:15 | 2023-07-17T15:29:15 | 29,307,758 | 1 | 5 | null | 2023-05-24T11:41:22 | 2015-01-15T16:59:28 | Python | UTF-8 | Python | false | false | 4,646 | py | #!/usr/bin/python
# standard library imports
import os
import sys
import json
import subprocess
import argparse
import errno
# third party imports
# application/library imports
CLI = argparse.ArgumentParser(
description="Get/Calculate integrated luminosity for given runs",
epilog="This tool uses the brilcalc suite to extract luminosity information,"
"\nautomating the queries and environment setup."
"\n"
"\nThe brilcalc documentation can be found at"
"\nhttp://cms-service-lumi.web.cern.ch/cms-service-lumi/brilwsdoc.html",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
CLI_runs = CLI.add_argument_group("run definition")
CLI_runs.add_argument(
"runs",
help="runs in CMS JSON file format; either a file path (quoted runs) or raw string (unquoted runs)"
)
CLI_bril = CLI.add_argument_group("brilsw/brilcalc settings")
CLI_bril.add_argument(
"--brilconda-path",
default="/afs/cern.ch/cms/lumi/brilconda-1.0.3",
help="path to the brilconda suite (contains bin and lib directories)"
)
CLI_bril.add_argument(
"--brilws-path",
default="~/.local",
help="pip virtual env of brilws"
)
CLI_bril.add_argument(
"--lumi-unit",
default="/pb",
help="unit of lumi output, e.g. /fb, /pb or 1e39/cm2"
)
CLI_bril.add_argument(
"--normtag",
default=None,
help="lumi calibration/correction function or json"
)
def get_bril_env(brilconda_path, brilws_path):
"""
Create the env for running bril commands
:param brilconda_path: path of the brilconda suite (contains bin and lib directories)
:type brilconda_path: str
:param brilws_path: pip virtual env of brilws
:type brilws_path: str
:returns: env for processes using brilws to run in
:rtype: dict
"""
print >> sys.stderr, "Preparing bril environment"
# construct dedicated env for bril commands
bril_env = os.environ.copy()
bril_env["PATH"] = ":".join((
os.path.join(os.path.expanduser(brilws_path), "bin"),
os.path.join(os.path.expanduser(brilconda_path), "bin"),
bril_env["PATH"],
))
# make sure brilws is available
get_proc_output(
['pip', 'install', '--install-option=--prefix=$HOME/.local', 'brilws'],
env=bril_env,
)
return bril_env
def get_lumi(run_str, bril_env, unit="/pb", normtag=None):
"""
Get the lumi for a specific run string from brilcalc
"""
print >> sys.stderr, "Querying brilcalc"
# use CSV output for easier parsing
bril_out, bril_err = get_proc_output(
[
"brilcalc",
"lumi", "-i", run_str,
"--output-style", "csv",
"-u", unit,
] + [
"--normtag", normtag
] if normtag is not None else [],
env=bril_env,
)
bril_iter, header, values = iter(bril_out.splitlines()), None, None
while True:
line = bril_iter.next()
# we only care about the summary for the runs
if not line.startswith('#Summary:'):
continue
header = bril_iter.next()
values = bril_iter.next()
break
header = header.replace("(%s)" % unit, "")
header = header[1:].split(",")
values = [
float(value) if "." in value else int(value)
for value in values[1:].split(",")
]
return dict(zip(header, values))
def main():
opts = CLI.parse_args()
# all bril commands execute with brilws suite
bril_env = get_bril_env(
brilconda_path=opts.brilconda_path,
brilws_path=opts.brilws_path
)
lumi_dict = get_lumi(
run_str=opts.runs,
bril_env=bril_env,
unit=opts.lumi_unit,
normtag=opts.normtag,
)
print json.dumps(lumi_dict)
# -- Helpers -------------------------------------------------------------------
class CalledProcessError(Exception):
def __init__(self, returncode, cmd="<unknown>", output=None):
self.returncode, self.cmd, self.output = returncode, cmd, output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def get_proc_output(*popenargs, **kwargs):
"""
Tweaked version of subprocess.check_output (which is not in py2.6 anyways)
:param popenargs: arguments to Popen
:param kwargs: keyword arguments to Popen
:returns: stdout and stderr of the process
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
try:
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, *popenargs, **kwargs)
except OSError as oserr:
if oserr.errno == errno.ENOENT:
raise EnvironmentError(
"Executable for '%s' not found" % kwargs.get("args", popenargs[0])
)
else:
raise
stdout, stderr = process.communicate()
if process.poll(): # check retcode != 0
print stdout, stderr
raise CalledProcessError(
returncode=process.poll(),
cmd=kwargs.get("args", popenargs[0]),
output=stdout,
)
return stdout, stderr
if __name__ == "__main__":
main()
| [
"max.fischer@kit.edu"
] | max.fischer@kit.edu |
73a74888c6116f3919e354c714885223f569f7b4 | 1c6299a4b63264e4722c1230ddc6fbd1e8795040 | /Tareas/Proyecto/Proyecto.py | 5cad493665fece58c650333c82b1d85fef9bfc7b | [] | no_license | RMACR7LP/Programacion1 | a7f930c4c5881af42f3e24c96fb77d38a0b19cee | f207da58657309f1de999e11d8d25f6722f2a667 | refs/heads/master | 2021-01-02T23:01:13.209506 | 2017-11-12T17:56:47 | 2017-11-12T17:56:47 | 99,440,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,914 | py | #-*- coding:utf-8 -*-
from Tkinter import *
import ttk
import datetime
import json
import subprocess
import sys
import os
import socket
import reportlab
from PIL import Image, ImageTk
from reportlab.pdfgen import canvas
from reportlab.lib.colors import PCMYKColor
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.barcharts import VerticalBarChart
try:
import cPickle as pickle
except ImportError:
import pickle
#-------------------------------------------Funciones y Clases-------------------------------------------------------
cities=[]
datac=[]
estaciones_visitadas=[]
aviso=[]
def signupclick():
nombre_dado=textentry1.get()
password_dado=textentry2.get()
with open('usuarios.json','r') as lista:
d=json.load(lista)
x=len(d["usuarios"])
i=0
while i<x:
if d["usuarios"][i]["nombre"]==nombre_dado:
Label(ventana,text="Un usuario con ese nombre ya está registado").grid(row=5,column=0,columnspan=4)
i=x+1
else:
i+=1
if i==x:
password_dado=textentry2.get()
d["usuarios"].append({
"nombre": nombre_dado,
"password": password_dado
})
with open('usuarios.json','w') as entrada:
json.dump(d, entrada)
Label(ventana,text="Felicidades! Te has registrado con éxito.").grid(row=5,column=0,columnspan=4)
def loginclick():
global textentry1
global cuenta_dado
global aviso
cuenta_dado=textentry1.get()
password_dado=textentry2.get ()
with open('usuarios.json','r') as lista:
d=json.load(lista)
x=len(d['usuarios'])
indices =[]
i=0
while i<x:
if d['usuarios'][i]['nombre']==cuenta_dado:
indices.append(i)
i+=1
else:
i+=1
if len(indices)==0:
Label(ventana,text="Ese nombre de usuario o correo no está registrado").grid(row=5,column=0)
else:
j=indices[0]
if password_dado==d['usuarios'][j]['password']:
aviso=['hola']
ventana.destroy()
else:
print "La contraseña no es correcta, presione enter"
class Usuario():
def __init__(self,nombre):
self.nombre = nombre
def asignar_ultimasciudades(self,cities):
self.cities=cities
def asignar_datos(self,datac):
self.datac=datac
def asignar_estaciones(self,estaciones_visitadas):
self.estaciones_visitadas=estaciones_visitadas
def guardar(nombre,cities,datac,estaciones_visitadas):
if len(estaciones_visitadas)!=0 and len(cities)!=0:
jugador = Usuario(nombre)
jugador.asignar_ultimasciudades(cities)
jugador.asignar_datos(datac)
jugador.asignar_estaciones(estaciones_visitadas)
archivo = open(str(nombre)+".txt", "w")
pickle.dump(jugador, archivo,1)
archivo.close()
elif len(estaciones_visitadas)==0:
estaciones_visitadas=['None']
jugador = Usuario(nombre)
jugador.asignar_ultimasciudades(cities)
jugador.asignar_datos(datac)
jugador.asignar_estaciones(estaciones_visitadas)
archivo = open(str(nombre)+".txt", "w")
pickle.dump(jugador, archivo,1)
archivo.close()
elif len(cities)==0:
cities=['None']
jugador = Usuario(nombre)
jugador.asignar_ultimasciudades(cities)
jugador.asignar_datos(datac)
jugador.asignar_estaciones(estaciones_visitadas)
archivo = open(str(nombre)+".txt", "w")
pickle.dump(jugador, archivo,1)
archivo.close()
def load():
fichero=open(str(cuenta_dado)+".txt","r+")
jugador=pickle.load(fichero)
lista_ciudades=jugador.cities
lista_datos=jugador.datac
lista_estaciones=jugador.estaciones_visitadas
print jugador.nombre
print lista_ciudades
print lista_datos
global cities
global datac
global estaciones_visitadas
cities=lista_ciudades
datac=lista_datos
estaciones_visitadas=lista_estaciones
def stateclick():
global tab1_estado
global tab1_state
global tab1_estacion
global stationsr
tab1_state=tab1_estado.get()
try:
if int(tab1_state)==1 or int(tab1_state)==2:
m="estaciones"+str(tab1_state)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
w=""
for i in range(0,l):
w= w+str(i+1)+") "+n['results'][i]['name']+"\n"
elif 2<int(tab1_state)<6:
m="estaciones"+str(int(tab1_state)+1)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
w=""
for i in range(0,l):
w= w+str(i+1)+") "+n['results'][i]['name']+"\n"
elif 5<int(tab1_state)<12:
m="estaciones"+str(int(tab1_state)+2)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
w=""
for i in range(0,l):
w= w+str(i+1)+") "+n['results'][i]['name']+"\n"
elif 11<int(tab1_state)<40:
m="estaciones"+str(int(tab1_state)+3)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
w=""
for i in range(0,l):
w= w+str(i+1)+") "+n['results'][i]['name']+"\n"
elif 39<int(tab1_state)<52:
m="estaciones"+str(int(tab1_state)+4)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
w=""
for i in range(0,l):
w= w+str(i+1)+") "+n['results'][i]['name']+"\n"
else:
w="La opción ingresada no es válida"
except ValueError:
w="La opción no es válida"
stationsr = Text(tab1,width=32,height=30,wrap=WORD,background="white")
stationsr.grid(row=0,rowspan=60,column=10, columnspan=10, sticky=E)
stationsr.insert (END,str(w))
tab1_estado.grid(row=60,column=10)
Button(tab1,text="OpciónEstado",command=stateclick).grid(row=60,column=11,columnspan=2,sticky=W)
tab1_estacion= Entry(tab1, width=3, bg="white")
tab1_estacion.grid(row=60,column=13,sticky=E)
Button(tab1,text="Estación",command=stationclick).grid(row=60,column=14,sticky=E)
def stationclick():
global tab1_station
global estaciones_visitadas
try:
if int(tab1_state)==1 or int(tab1_state)==2:
m="estaciones"+str(tab1_state)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
elif 2<int(tab1_state)<6:
m="estaciones"+str(int(tab1_state)+1)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
elif 5<int(tab1_state)<12:
m="estaciones"+str(int(tab1_state)+2)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
elif 11<int(tab1_state)<40:
m="estaciones"+str(int(tab1_state)+3)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
elif 39<int(tab1_state)<52:
m="estaciones"+str(int(tab1_state)+4)+".json"
with open(m,'r') as estaciones:
n=json.load(estaciones)
l=len(n['results'])
tab1_station=int(tab1_estacion.get())
if 0<tab1_station<26:
stationsr.delete(0.0,END)
stationsr.insert (END,"Nombre: "+n['results'][int(tab1_station)-1]['name']
+"\n"+ "-Fecha Inicial: "+n['results'][int(tab1_station)-1]['mindate']
+"\n"+ "-FechaFinal: "+n['results'][int(tab1_station)-1]['maxdate']
+"\n"+ "-Latitude: "+str(n['results'][int(tab1_station)-1]['latitude'])
+"\n"+ "-Longitud: "+str(n['results'][int(tab1_station)-1]['longitude'])
+"\n"+ "-Cobertura de Datos: "+str(n['results'][int(tab1_station)-1]["datacoverage"])
+"\n"+ "-ID: "+n['results'][int(tab1_station)-1]["id"])
q="Se visitó la estación número "+str(tab1_station)+" correspondiente al estado "+str(tab1_state)+" en "+str(datetime.datetime.now())
estaciones_visitadas.append(q)
else:
stationsr.delete(0.0,END)
stationsr.insert (END,"La opción ingresada no es válida")
except ValueError:
stationsr.delete(0.0,END)
stationsr.insert(END,"La opción ingresada no es válida")
def weatherclick():
city=entry1.get()
country=entry2.get()
weather= subprocess.Popen(["curl ","http://api.openweathermap.org/data/2.5/weather?q="+str(city)+","+str(country)+"&APPID=88e766988b7e28f66160c1bf837bbc54","-o", str(city)+'.json'])
weather.communicate()
output.delete(0.0,END)
with open(str(city)+'.json','r') as f:
datos=json.load(f)
try:
temperatura= "Temperatura: "+ str(datos['main']['temp']-273.15)+"°C"
presion="Presión: " + str(datos['main']['pressure'])+" hPa"
humedad="Humedad: "+ str(datos['main']['humidity'])+"%"
minTemp="Temperatura Mínima: " + str(datos['main']['temp_min']-273.15)+"°C"
maxTemp="Temperatura Máxima: " + str(datos['main']['temp_max']-273.15)+"°C"
wind="Viento: "+ str(datos['wind']['speed'])+" m/s"
cities.append(city)
except:
temperatura="Lo sentimos pero la ciudad que ha seleccionada no existe\n tome en cuenta que el nombre de la ciudad debe comenzar en mayúscula,\n debe estar escrito en ingles y el codigo del pais en minusculas."
presion= ""
humedad=""
minTemp=""
maxTemp= ""
wind=""
output.insert (END,str(temperatura)+"\n"+str(presion)+"\n"+str(humedad)+"\n"+ str(minTemp) +"\n"+str(maxTemp)+"\n"+str(wind))
datac.append(datos['main']['temp']-273.15)
if datos['weather'][0]['main']=="Thunderstorm":
photo1=PhotoImage(file="Thunderstorm.gif")
label=Label (tab2,image=photo1) .grid(row=3,column=15)
label2= Label (tab2,text='Clouds',fg='clouds') .grid(row=5,column=15)
elif datos['weather'][0]['main']== "Drizzle":
photo1=PhotoImage(file="Drizzle.gif")
label=Label (tab2,image=photo1) .grid(row=3,column=15)
label2= Label (tab2,text='Clouds',fg='clouds') .grid(row=5,column=15)
elif datos['weather'][0]['main']== "Rain":
photo1=PhotoImage(file="Rain.gif")
label=Label (tab2,image=photo1) .grid(row=3,column=15)
label2= Label (tab2,text='Clouds',fg='clouds') .grid(row=5,column=15)
elif datos['weather'][0]['main']== "Snow":
photo1=PhotoImage(file="Snow.gif")
label=Label (tab2,image=photo1) .grid(row=3,column=15)
label2= Label (tab2,text='Clouds',fg='clouds') .grid(row=5,column=15)
elif datos['weather'][0]['main']== "Clear":
photo1=PhotoImage(file="Clear.gif")
label=Label (tab2,image=photo1) .grid(row=3,column=15)
label2= Label (tab2,text='Clouds',fg='clouds') .grid(row=5,column=15)
elif datos['weather'][0]['main']== "Clouds":
photo1=PhotoImage(file="Clouds.gif")
label=Label (tab2,image=photo1) .grid(row=3,column=15)
label2= Label (tab2,text='Clouds',fg='clouds') .grid(row=5,column=15)
def tabla():
global estaciones_visitadas
print estaciones_visitadas
for i in range(0,len(estaciones_visitadas)):
Label(tab3,text=str(estaciones_visitadas[i]),fg="black").grid(row=i,column=0,sticky=W)
def graph():
ciudad1=str(cities[len(cities)-1])
ciudad2='m'
if len(cities)>2:
counter=1
i=2
while i<len(cities)+1:
if ciudad1==str(cities[len(cities)-i]) or ciudad2==str(cities[len(cities)-i]):
i+=1
elif ciudad2=='m':
ciudad2=str(cities[len(cities)-i])
counter +=1
if counter<4:
i+=1
else:
i=len(cities)+2
elif ciudad2 !='m':
if ciudad1==str(cities[len(cities)-i]) or ciudad2==str(cities[len(cities)-i]):
i+=1
else:
ciudad3 =str(cities[len(cities)-i])
i=len(cities)+2
indices1=[]
indices2=[]
indices3=[]
for i in range(0,len(cities)):
if ciudad1==cities[i]:
indices1.append(i)
elif ciudad2==cities[i]:
indices2.append(i)
elif ciudad3==cities[i]:
indices3.append(i)
temp1=0
temp2=0
temp3=0
for m in indices1:
temp1=temp1+datac[m]
for m in indices2:
temp2=temp2+datac[m]
for m in indices3:
temp3=temp3+datac[m]
temp1=temp1/len(indices1)
temp2=temp2/len(indices2)
temp3=temp3/len(indices3)
d = Drawing(220, 180)
bar = VerticalBarChart()
bar.x = 25
bar.y = 55
data = [[temp1,0,temp2,0,temp3]
]
bar.data = data
try:
bar.categoryAxis.categoryNames = [ciudad1, '', ciudad2, '', ciudad3,'']
bar.bars[0].fillColor = PCMYKColor(100,0,90,50,alpha=85)
d.add(bar, '')
d.save(formats=['gif'], outDir='.', fnRoot='grafica')
except:
bar.categoryAxis.categoryNames = [str(cities[len(cities)-1])]
bar.bars[0].fillColor = PCMYKColor(100,0,90,50,alpha=85)
d.add(bar, '')
d.save(formats=['gif'], outDir='.', fnRoot='grafica')
photo1=PhotoImage(file="grafica.gif")
label=Label (tab4,image=photo1) .grid(row=5,column=0)
label2= Label (tab4,text='grafica',fg="grafica") .grid(row=5,column=3)
REMOTE_SERVER = "www.google.com"
def internet_on():
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(REMOTE_SERVER)
# connect to the host -- tells us if the host is actually
# reachable
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
print internet_on()
#------------------------------------------Ventana de Inicio---------------------------------------------------
internet_on()
ventana=Tk()
background_image=ImageTk.PhotoImage(Image.open("cielo.jpg"))
background_label=Label(ventana,image=background_image)
background_label.place(x=0,y=0,relwidth=1,relheight=1)
ventana.title("Proyecto Cristian")
ventana.geometry('600x400')
ventana.configure(background="gray")
Label (ventana,text="Usuario", bg="gray",fg="black").grid(row=0, column=0, sticky=W)
Label (ventana,text="Password", bg="gray",fg="black").grid(row=0+2, column=0, sticky=W)
textentry1=Entry(ventana, width=20, bg="white")
textentry1.grid(row=0, column=1,sticky=W)
textentry2=Entry(ventana, width=20, bg="white", show='*')
textentry2.grid(row=0+2, column=1,sticky=W)
Button(ventana,text="Login",width=5,command=loginclick).grid(row=4,column=0,sticky=W)
Button(ventana,text="Sign Up",command=signupclick).grid(row=4,column=1,sticky=W)
ventana.mainloop()
#--------------------------------------------Ventana de Menu----------------------------------------------
if len(aviso)==1:
ventana2=Tk()
background_image=ImageTk.PhotoImage(Image.open("mar.jpg"))
background_label=Label(ventana2,image=background_image)
background_label.place(x=0,y=0,relwidth=1,relheight=1)
ventana2.title("Menu")
ventana2.geometry('800x600')
rows=0
while rows<50:
ventana2.rowconfigure(rows,weight=1)
ventana2.columnconfigure(rows,weight=1)
rows +=1
nb=ttk.Notebook(ventana2)
nb.grid(row=3,column=0,columnspan=50,rowspan=49,sticky='NESW')
Button(ventana2,text="Guardar",width=6,command=lambda : guardar(cuenta_dado,cities,datac,estaciones_visitadas)).grid(row=0,column=1,sticky=W)
Button(ventana2,text="Load", width=5, command=lambda : load()).grid(row=0, column=2,sticky=W)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Pestaña 1!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
tab1=ttk.Frame(nb)
nb.add(tab1,text='Estaciones\n NOAA')
Label(tab1,text="Como puedes ver todos los estados de EEUU están enlistados, puedes presionar la tecla que desees \n para poder ver las estaciones que se encuentran en un especifico estado o bien, elegir la opcion \n de enlistar todas las posibles estaciones en el país",fg="black"). grid(row=0, columnspan=3, sticky=W)
with open('estados.json','r') as lista1:
x= json.load(lista1)
for i in range (0,50,3):
Label(tab1,text = str(i+1)+" "+x['results'][i]["name"],fg="black").grid(row=1+i,column=0,sticky=W)
Label(tab1,text=str(i+2)+" "+x['results'][i+1]["name"],fg="black").grid(row=1+i,column=1,sticky=W)
Label(tab1,text= str(i+3)+" "+x['results'][i+2]["name"], fg="black").grid(row=1+i,column=2,sticky=W)
tab1_estado= Entry(tab1,width=3,bg="white")
tab1_estado.grid(row=0,column=10,sticky=W)
Button(tab1,text="Opcion Estado",command=stateclick).grid(row=0,column=11,sticky=W)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Pestaña 2!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
temperatura=""
presion=""
humedad=""
minTemp=""
maxTemp=""
tab2=ttk.Frame(nb)
nb.add(tab2,text='Datos por Ciudad\n OpenStreetMap')
Label (tab2,text="Ciudad:",fg="black").grid(row=0, column=0, sticky=W)
entry1=Entry(tab2, width=20, bg="white")
entry1.grid(row=0, column=1,sticky=W)
Label (tab2,text="Pais:",fg="black").grid(row=1, column=0, sticky=W)
entry2=Entry(tab2, width=20, bg="white")
entry2.grid(row=1, column=1,sticky=W)
Button(tab2,text="Buscar",width=5,command=weatherclick).grid(row=4,column=0,sticky=W)
output= Text(tab2,width=30,height=20,wrap=WORD,background="white")
output.grid(row=0,rowspan=20,column=2, columnspan=10, sticky=E)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Pestaña 3!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
tab3=ttk.Frame(nb)
nb.add(tab3,text='Tabla de Estaciones\n Visitadas')
Button(tab3,text="Registro Estaciones", command=tabla).grid(row=0,column=0)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Pestaña 4!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
tab4=ttk.Frame(nb)
nb.add(tab4,text='Grafico ciudades \n consultadas, historico')
Label(tab4,text="Una vez que se hayan visitado 3 o más ciudades puedes crear una gráfica con la temperatura promedio de las últimas tres ciudades visitadas.", fg="black").grid(row=0,column=0,sticky=W)
Button(tab4,text="Crear Gráfica", width=10, command=graph).grid(row=3,column=0)
ventana2.mainloop()
#----------------------------------------------------------------------
# api key: 88e766988b7e28f66160c1bf837bbc54
# http://api.openweathermap.org/data/2.5/weather?q=Lakewood,us&APPID=88e766988b7e28f66160c1bf837bbc54 | [
"cristianjalvarez123@gmail.com"
] | cristianjalvarez123@gmail.com |
0f6bff7af88112200164ee73a63e93548e0b7606 | 1094e533594d6fbdf4a0f605b06a1954336b52e8 | /index/views.py | 586009091d84dd75a9a807174d8ade7c1949bc90 | [] | no_license | leezhiyong08/friutday | ac424c31bc2dd54aa61e76f13b8264042b4ba741 | 16f6a25d827f64fe88a526adf3e51de543b1c2de | refs/heads/master | 2020-04-24T01:14:11.321113 | 2019-02-16T13:40:16 | 2019-02-16T13:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,810 | py | import json
from django.core import serializers
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import *
# Create your views here.
def index_views(request):
return render(request,'index.html')
# /login 对应的视图
def login_views(request):
url = '/'
if request.method == 'GET':
# get 的流程
# 判断session中是否有登录信息
if 'uid' in request.session and 'uphone' in request.session:
# session中有值,重定向回首页或原路径
print('session中有数据')
return redirect(url)
else:
# session中没有值
# 判断cookie中是否有uid和uphone
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
# cookie 中有登录信息
# 从cookie中取出数据保存进session
uid = request.COOKIES['uid']
uphone = request.COOKIES['uphone']
request.session['uid']=uid
request.session['uphone']=uphone
# 重定向到首页或原路径
return redirect(url)
else:
# cookie 中没有登录信息
# 去往登录页面
form = LoginForm()
return render(request,'login.html',locals())
else:
# post 的流程
# 实现登录操作:取出uphone和upwd到db中判断
uphone = request.POST['uphone']
upwd = request.POST['upwd']
uList = Users.objects.filter(uphone=uphone,upwd=upwd)
# if uList:
if uphone=='13511225566' and upwd=='123456':
# 登录成功
# uid = uList[0].id
# 取出 uphone 和 uid 保存进session
uid = '01'
request.session['uid'] = uid
request.session['uphone'] = uphone
# 判断是否有记住密码,记住密码的话则将值保存进cookie
resp = redirect(url)
if 'isSaved' in request.POST:
# 记住密码,保存进cookie
expires = 60 * 60 * 24 * 366
resp.set_cookie('uid',uid,expires)
resp.set_cookie('uphone',uphone,expires)
# 重定向到首页或原路径
return resp
else:
#登录失败 : 回登录页
form = LoginForm()
errMsg = "用户名或密码不正确"
return render(request,'login.html',locals())
# /register 对应的视图
def register_views(request):
if request.method == 'GET':
return render(request,'register.html')
else:
#实现注册的功能
dic ={
"uphone":request.POST['uphone'],
"upwd":request.POST['upwd'],
"uname":request.POST['uname'],
"uemail":request.POST['uemail'],
}
#将数据插入进数据库 - 注册
Users(**dic).save()
#根据uphone的值再查询数据库
u = Users.objects.get(uphone=request.POST['uphone'])
#将用户id和uphone保存进session
request.session['uid'] = u.id
request.session['uphone'] = u.uphone
return redirect('/')
# 检查手机号码是否存在 -> /check_uphone/
def check_uphone_views(request):
if request.method == 'POST':
#接收前端传递过来的手机号码
uphone = request.POST['uphone']
uList = Users.objects.filter(uphone=uphone)
if uList:
# 如果条件为真,则表示手机号码已经存在
# 响应 status值为0,用于通知客户端手机号码已存在
# 响应 text值为 “手机号码已存在”
dic = {
"status":"0",
"text":'手机号码已存在',
}
return HttpResponse(json.dumps(dic))
else:
dic = {
"status":"1",
"text":"可以注册",
}
return HttpResponse(json.dumps(dic))
# 检查用户是否登录,如果有的话则取出uname的值
def check_login_views(request):
# 判断 session 中是否有 uid 和 uphone
if 'uid' in request.session and 'uphone' in request.session:
# 用户此时处于登录状态
# 根据 uid 获取 uname 的值
uid = request.session['uid']
user = Users.objects.get(id=uid)
#处理响应数据
dic = {
"status":'1',
'user':json.dumps(user.to_dict())
}
return HttpResponse(json.dumps(dic))
else:
# 判断cookie是否有登录信息
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
# 从cookie中取出数据保存进session
uid = request.COOKIES['uid']
uphone = request.COOKIES['uphone']
request.session['uid']=uid
request.session['uphone']=uphone
# 根据uid查询处对应的user信息转换成字典,响应给客户端
user = Users.objects.get(id=uid)
jsonStr = json.dumps(user.to_dict())
dic = {
"status":"1",
"user":jsonStr,
}
return HttpResponse(json.dumps(dic))
else:
# session和cookie中都没有登录信息
dic = {
"status":0,
'text':'用户尚未登录'
}
if request.method == 'POST':
tmp_url = '/'
uphone = request.POST['uphone']
tmp_resp = redirect(tmp_url)
tmp_expires = 60 * 60 * 24 * 366
tmp_resp.set_cookie('uphone', uphone, tmp_expires)
return redirect(tmp_url)
return HttpResponse(json.dumps(dic))
# 退出登录
# 清除 session 和 cookie 中的数据
# 原路返回
def logout_views(request):
#获取请求源地址,如果没有,则返回首页 /
url = request.META.get('HTTP_REFERER','/')
resp = redirect(url)
# 判断 session 中是否有登录信息
if 'uid' in request.session and 'uphone' in request.session:
del request.session['uid']
del request.session['uphone']
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
resp.delete_cookie('uid')
resp.delete_cookie('uphone')
return resp
def type_goods_views(request):
all_list=[]
types=GoodsType.objects.all()
for type in types:
type_json=json.dumps(type.to_dic())
g_list=type.goods_set.all()
g_list_json=serializers.serialize('json',g_list)
dic={
'type':type_json,
'goods':g_list_json,
}
all_list.append(dic)
return HttpResponse(json.dumps(all_list))
| [
"lvze@tedu.cn"
] | lvze@tedu.cn |
557c96972141d1a75b7f45e4289a642a6390440e | 08dfaf714830a6310742dcd50848790d595e838e | /位运算/code_01_EvenTimesOddTimes.py | c16881e90ab21aa241caa096e317d2dd06fa949c | [] | no_license | Tokyo113/leetcode_python | d9e0fb96a76efaadcec7aad08f5ef542d898d434 | e86b3fb26aef1cf63727e3e5c9fd4ddc9bedb7f1 | refs/heads/master | 2020-08-10T15:36:10.364714 | 2020-04-13T08:28:53 | 2020-04-13T08:28:53 | 214,369,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | #coding:utf-8
'''
@Time: 2019/12/4 21:54
@author: Tokyo
@file: code_01_EvenTimesOddTimes.py
@desc:
1.一个数组中有一种数出现了奇数次,其他数都出现了偶数次,怎么找到这一个数
2.一个数组中有两种数出现了奇数次,其他数都出现了偶数次,怎么找到这两个数
'''
def findOddTimes1(arr):
eor = 0
for i in arr:
eor = eor ^ i
return eor
def findOddTimes2(arr):
eor = 0
for i in arr:
eor = eor ^ i
# eor = a ^ b
# 取得eor最右侧的1,eor肯定不为0,存在一位为1
# 这两个数肯定在这一位不一样,一个为1,一个为0
rightone = eor & (~eor+1)
eor1 = 0
for i in arr:
if (i&rightone) == 0:
eor1 = eor1 ^ i
return eor1, eor1^eor
if __name__ == '__main__':
a = [1,2,3,2,1,2,4,4,3,2,5]
print(findOddTimes1(a))
b = [4, 3, 4, 2, 2, 1, 4, 1, 1, 1, 3, 3, 1, 1, 1, 4, 2, 2]
print(findOddTimes2(b))
print(find2(b)) | [
"21810179@zju.edu.cn"
] | 21810179@zju.edu.cn |
35f47b09947f8ea4d901c650b0b88fab65047c57 | fb9b0f9c6fb593dfb8e23ee04454f8c0a37cb15d | /other/ppmatrix.py | a84ab5fee6b75a0fd18b3583feba864d84ed27df | [] | no_license | dmishin/dmishin-pyscript | 624f1efda94916780bf25c6bd554c75f43936170 | 494433c26daf826f4b914f81ceaa69dc2f35c350 | refs/heads/master | 2021-01-02T22:52:09.917804 | 2010-08-28T20:11:11 | 2010-08-28T20:11:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,710 | py | from itertools import imap, count
def mul(m1,m2, lazy=False):
if m1.width!=m2.height:
raise ValueError, "Matrix sizes do not match"
k_range = range(m1.width)
def sumat(i,j):
return sum([m1[i,k]*m2[k,j] for k in k_range])
if not lazy:
return mtx(m1.height, m2.width, sumat)
else:
return vmtx(m1.height, m2.width, sumat)
def trace(m):
return sum(diag(m))
def diag(m):
h,w = m.size()
return [m[i,i] for i in range(min(h,w))]
def inv(m, one = 1.0, zero = 0.0):
m = copy(m)
h,w = m.size()
if h!=w:
raise ValueError, "Matrix must be square"
im = copy(eye(h))
def max_row_idx(col):
maxidx = col
maxval = abs(m.data[col][col])
for row in range(col+1,h):
v = abs(m.data[row][col])
if v>maxval:
maxval = v
maxidx = row
return maxidx
for i in range(h): #column
#find max in column
row_max = max_row_idx(i)
## print "==="
## print m
## print "col", i, "max row", row_max
m.line_swap(i, row_max)
im.line_swap(i, row_max)
## print "swapped"
## print m
#normalize row
A_ii = m[i,i]
if not A_ii:
raise ZeroDivisionError, "Matrix can not be inverted"
m.line_iscale(i, A_ii)
im.line_iscale(i, A_ii)
#make zeros
for j in range(h):
if j!=i:
im.line_combine(i, -m[j,i], j)
m.line_combine(i, -m[j,i], j)
## print "Zeroized"
## print m
## print "=============="
return im
def add(*mts):
def sumat(i,j):
return sum([m[i,j] for m in mts])
sz=mts[0].size()
for m in mts:
if m.size() != sz:
raise ValueError, "matrix sizes not match"
return mtx(sz[0],sz[1], sumat)
def copy(m):
h,w = m.size()
return mtx(h,w, m.get)
def const_values(v):
def val(i,j):
return v
def none_values(i,j):
return None
class base_mtx:
def __init__(self):
pass
def issq(self):
w,h=self.size()
return w==h
def get(self, i,j):
return self[i,j]
def __str__(self):
h,w = self.size()
return "\n".join(["["+", ".join([str(self[i,j]) for j in range(w)])+"]" for i in range(h)])
def __repr__(self):
h,w = self.size()
return "mtx(%d,%d,%s)"%(h,w,\
"["+",".join(\
["["+",".join([repr(self[i,j]) for j in range(w)])+"]" for i in range(h)]\
)+"]")
def __add__(self, m):
if isinstance(m, base_mtx):
return add(self, m)
else:
h,w = self.size()
return add(self, ones(h,w,m))
def __radd__(self, m):
return self.__add__(m)
def __rmul__(self, m):
if isinstance(m, base_mtx):
return mul(m,self)
else:
h,w = self.size()
return mtx(h,w,lambda i,j: self[i,j]*m)
def col(self,i, lazy = False):
h,w = self.size()
if not lazy:
return [self[j,i] for j in range(h)]
else:
return _lazy_list(lambda j: self[j,i], h)
class _lazy_list:
"List, based on function"
def __init__(self, func, length = None):
self.func = func
self.length = length
def __getitem__(self, idx):
return self.func(idx)
def __iter__(self):
return imap(lambda idx:self.func(idx), count())
def __len__(self):
return self.length
def mxmap(func, m, lazy = False):
mtx_type = mtx if not lazy else vmtx
h,w = m.size()
return mtx_type(h,w,lambda i,j:func(m[i,j]))
class mtx (base_mtx):
def __init__(self, w, h, values=none_values):
if hasattr(values, "__call__"):
self.data=[[values(i,j) for j in xrange(w)] for i in xrange(h)]
elif hasattr(values, "__getitem__"):
self.data = values
else:
raise ValueError, "Values must be list or function"
self.width = w
self.height = h
def __getitem__(self, (i, j)):
return self.data[i][j]
def __setitem__(self,(i,j),v):
self.data[i][j]=v
def size(self):
return self.height, self.width
def __mul__(self, m):
if isinstance(m, base_mtx):
return mul(self, m)
else:
h,w = self.size()
return mtx(h,w,lambda i,j: self[i,j]*m)
def line_combine(self, i,a,j):
"Replace i'th line with a[i]*a + a[i]"
Ai = self.data[i]
Aj = self.data[j]
self.data[j] = [Ai*a + Aj for Ai,Aj in zip(self.data[i],self.data[j])]
def line_scale(self, i, k):
self.data[i] = [x* k for x in self.data[i]]
def line_iscale(self, i, k):
self.data[i] = [x/k for x in self.data[i]]
def line_swap(self, i,j):
if i==j: return
self.data[i], self.data[j] = self.data[j], self.data[i]
def eye(h, w=None, one=1,zero=0):
if w==None: w=h
return vmtx(h, w, lambda i,j: one if i==j else zero)
def zeros(h,w=None, zero=0):
if w==None: w=h
return vmtx(h,w,lambda i,j:zero)
def ones(h,w=None, one=1):
return zeros(h,w,one)
class vmtx(base_mtx):
def __init__(self, w, h, values=none_values):
self.values = values
self.width = w
self.height = h
def __getitem__(self, (i, j)):
return self.values(i,j)
def size(self):
return self.height, self.width
class transposed(base_mtx):
def __init__(self, m):
self.original = m
def __getitem__(self, idx):
return self.original.__getitem__(idx)
def __setitem__(self, idx, v):
self.original.__setitem__(idx, v)
def size(self):
return self.original.size()[::-1]
class minor(base_mtx):
def __init__(self, m, cols, rows):
self.cols=cols
self.rows=rows
self.orig = m
def size(self):
return len(rows),len(cols)
def __getitem__(self, (i,j)):
return self.orig[self.rows[i],self.cols[j]]
def __setitem__(self, (i,j), v):
self.orig[self.rows[i],self.cols[j]] = v
def det(m):
w,h = m.size()
if w!=h:
raise ValueError, "matrix must be square"
if w == 1:
return m[0,0]
#class hjoin(base_mtx):
#def __init__(self, *matrices):
#self.mts = matrices
#self.hrefs = list() #references to the columns
#if len(mts)==0:
#self.height = 0
#self.width = 0
#else:
#h,w=matrices[0].size()
#for m in matrices:
#h1, w1 = m.size()
#if h1 != h:
#raise ValueError, "Can not hjoin matrices: sizes are wrong"
#w+=w1
#self.height = h
#self.width = w
#def size(self):
#return self.height, self.width
#def __getitem__(self, (i,j)):
#for m in self.matrices:
#w,h = m.size()
#if j<w:
#return m[i,j]
#j -= w
#raise IndexError, (i, "Index too big")
#def __setitem__(self, (i,j), v):
#for m in self.matrices:
#w,h = m.size()
#if j<w:
#m[i,j] = v
#return
#j -= w
#raise IndexError, (i, "Index too big")
from rational import rat
m=mtx(6,6,values=lambda i,j:rat(i+1,j+2) if i<=j else rat(0))
print inv(m,zero=rat(0), one=rat(1))
| [
"shintyakov@gmail.com"
] | shintyakov@gmail.com |
5b9ed6ed0530e8623a9bbac53c115fadbaf8fb92 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_recesses.py | a34a6d1f64dbe47f008faa9c0c762b260b8b828f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _RECESSES():
def __init__(self,):
self.name = "RECESSES"
self.definitions = recess
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['recess']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
3838634472d2c87a52af758b035cb3239c9a2c41 | 2414312a385f668efddefa8b690f94ddf81f3abd | /SD_DATA/PRISM/PRISM_regrid-0.25_NRT-vars-etopo_US.py | 20e2429d87bd98d0de5249efc19d1e0bc969ce3d | [] | no_license | ziwangdeng/DL_downscaling | ab04453c0ceb8fffee8f133b468ed823040c6626 | 563f3813a5e657d83cf99cd213d96162954fcb37 | refs/heads/master | 2022-07-19T08:08:26.320651 | 2020-05-13T00:39:12 | 2020-05-13T00:39:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,868 | py |
'''
Downscaling pre-rpocessing
Subsetting and re-griding near real-time PRISM.
TMAX/TMIN/PCT separate hdfs
'''
# general tools
import sys
from glob import glob
# data tools
import h5py
import numpy as np
import netCDF4 as nc
# custom tools
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/utils/')
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/DL_downscaling/')
import data_utils as du
from namelist import *
# macros
# interp_method = 'cubic'
VAR_list = ['PCT', 'TMIN', 'TMAX', 'TMEAN']
# import HR lon/lat from a single file
with h5py.File(PRISM_dir+'PRISM_PCT_2015_2020.hdf', 'r') as hdf_io:
land_mask = hdf_io['PRISM_PCT'][0, subset_ind[0]:subset_ind[1], subset_ind[2]:subset_ind[3]]
lon_4km = hdf_io['lon'][subset_ind[0]:subset_ind[1], subset_ind[2]:subset_ind[3]]
lat_4km = hdf_io['lat'][subset_ind[0]:subset_ind[1], subset_ind[2]:subset_ind[3]]
land_mask = np.isnan(land_mask)
# defining LR lon/lat
dx = 0.25; dy = 0.25
latlim = [24, 49]; lonlim = [-125, -100.25]
lon_025, lat_025 = np.meshgrid(np.arange(lonlim[0], lonlim[1], dx), np.arange(latlim[0], latlim[1], dy))
print('lon_4km.shape:{}; lon_025.shape:{}'.format(lon_4km.shape, lon_025.shape))
# ETOPO interp
print('Process ETOPO')
with nc.Dataset(BACKUP_dir+'ETOPO1_Ice_g_gmt4.grd') as nc_obj:
etopo_x = nc_obj.variables['x'][2000:7000] # subsetting north america
etopo_y = nc_obj.variables['y'][6000:]
etopo_z = nc_obj.variables['z'][6000:, 2000:7000]
etopo_lon, etopo_lat = np.meshgrid(etopo_x, etopo_y)
# coarse-graining ETOPO1
etopo_4km = du.interp2d_wraper(etopo_lon, etopo_lat, etopo_z, lon_4km, lat_4km, method=interp_method)
etopo_025 = du.interp2d_wraper(etopo_lon, etopo_lat, etopo_z, lon_025, lat_025, method=interp_method)
etopo_regrid = du.interp2d_wraper(lon_025, lat_025, etopo_025, lon_4km, lat_4km, method=interp_method)
# =========================== #
# dictionary (tuple)
dict_4km = {}
dict_025 = {}
dict_regrid = {}
# hdf5 labels
label_4km = []
label_025 = []
label_regrid = []
for VAR in VAR_list:
print('===== Process {} ===== '.format(VAR))
# load prism
with h5py.File(PRISM_dir+'PRISM_{}_2015_2020.hdf'.format(VAR), 'r') as hdf_io:
prism = hdf_io['PRISM_{}'.format(VAR)][...]
dtnum = hdf_io['datenum'][...]
# PRISM subset
prism_4km = prism[:, subset_ind[0]:subset_ind[1], subset_ind[2]:subset_ind[3]]
prism_025 = np.empty((len(dtnum),)+lon_025.shape)
prism_regrid = np.empty(prism_4km.shape) #
# loop over available dates (2015-2020)
for i in range(len(dtnum)):
# coarse-graining PRISM
temp_025 = du.interp2d_wraper(lon_4km, lat_4km, prism_4km[i, ...], lon_025, lat_025, method=interp_method)
temp_regrid = du.interp2d_wraper(lon_025, lat_025, temp_025, lon_4km, lat_4km, method=interp_method)
temp_regrid[land_mask] = np.nan
prism_025[i, ...] = temp_025
prism_regrid[i, ...] = temp_regrid
# collecting fields
dict_4km[VAR] = prism_4km
dict_025[VAR] = prism_025
dict_regrid[VAR] = prism_regrid
# collecting label
label_4km.append(VAR+'_4km')
label_025.append(VAR+'_025')
label_regrid.append(VAR+'_REGRID')
# dictionary to tuple
tuple_4km = tuple(dict_4km.values())
tuple_025 = tuple(dict_025.values())
tuple_regrid = tuple(dict_regrid.values())
tuple_etopo = (etopo_4km, etopo_025, etopo_regrid)
tuple_grids = (lon_025, lat_025, lon_4km, lat_4km, land_mask)
# mark labels
label_etopo = ['etopo_4km', 'etopo_025', 'etopo_regrid']
label_grids = ['lon_025', 'lat_025', 'lon_4km', 'lat_4km', 'land_mask']
# save hdf
tuple_save = tuple_4km + tuple_025 + tuple_regrid + tuple_etopo + tuple_grids
label_save = label_4km + label_025 + label_regrid + label_etopo + label_grids
du.save_hdf5(tuple_save, label_save, out_dir=PRISM_dir, filename='PRISM_regrid_2015_2020.hdf')
| [
"yingkaisha@gmail.com"
] | yingkaisha@gmail.com |
6b1337a8db31e35ab1f588c870e7418838a2eb92 | 3d45daee8b81777a25d54456659d73d58b9ce81e | /patient_main.py | b8d8fe64df98631c7f89f814e55cd4c008db2305 | [] | no_license | benrprince/ref-range-test-patients | 480016f78ceda9a02d203db218675b72fd49f426 | 9757a6fc38c6596b24effa73d6207d39e7c643db | refs/heads/main | 2023-06-23T18:36:50.883701 | 2021-07-02T12:44:12 | 2021-07-02T12:44:12 | 308,037,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | # Author: Ben Prince
# Version: 1.2
# Description: Used to figure out the number of test patients needed
# based on the sex and age ranges given in the DCW
import patient_sort as ps
import xlwt
import openpyxl
# TODO: Update xlwt to use openpyxl....currently works, but pulling in redundant library
def parse_patients(filename, overlap):
"""Used to separate the excel rows into 3 lists containing
each sex category. Skips first excel row because it is
assumed to be a header row. Returns: male, female, and
undifferentiated lists"""
m_list = []
f_list = []
u_list = []
# open excel file and get the first sheet
xl_workbook = openpyxl.open(filename)
sheet = xl_workbook.worksheets[0]
# iterate through the rows and sort the data into the three lists
for i in range(2, sheet.max_row):
temp_list = []
if str(sheet.cell(i, 1).value) == 'Male':
temp_list.append(str(sheet.cell(i, 1).value))
temp_list.append(int(sheet.cell(i, 2).value))
temp_list.append(int(sheet.cell(i, 3).value))
m_list.append(temp_list)
elif str(sheet.cell(i, 1).value) == 'Female':
temp_list.append(str(sheet.cell(i, 1).value))
temp_list.append(int(sheet.cell(i, 2).value))
temp_list.append(int(sheet.cell(i, 3).value))
f_list.append(temp_list)
else:
temp_list.append(str(sheet.cell(i, 1).value))
temp_list.append(int(sheet.cell(i, 2).value))
temp_list.append(int(sheet.cell(i, 3).value))
u_list.append(temp_list)
return m_list, f_list, u_list
def get_patients_wb(filename):
"""Runs the algorithm on the patient_sort file and
arranges the data into a workbook. Returns: the
formatted workbook with test patients"""
# minutes for 1 week. This can change
overlap = 10080
# use above function to split out the age and sex lines
m_list, f_list, u_list = parse_patients(filename, overlap)
# Write to a new workbook
wb = xlwt.Workbook()
patients = wb.add_sheet('Patients')
# Set up Doc
patients.write(0, 0, 'Sex', xlwt.Style.easyxf("font: bold on"))
patients.write(0, 1, 'Age', xlwt.Style.easyxf("font: bold on"))
# Get test patient data into lists from patient_sort file
m_list = ps.test_patient_list(m_list, overlap)
m_len = len(m_list)
f_list = ps.test_patient_list(f_list, overlap)
f_len = len(f_list)
u_list = ps.test_patient_list(u_list, overlap)
u_len = len(u_list)
# import male data into return doc
for i in range(1, m_len):
patients.write(i, 0, m_list[i-1][0])
patients.write(i, 1, m_list[i-1][1])
# import female data into return doc
for i in range(m_len, f_len + m_len):
patients.write(i, 0, f_list[i - m_len][0])
patients.write(i, 1, f_list[i - m_len][1])
# import undefined or unknown data into return doc
for i in range(m_len + f_len, m_len + f_len + u_len):
patients.write(i, 0, u_list[i - (m_len+f_len)][0])
patients.write(i, 1, u_list[i - (m_len+f_len)][1])
return wb
| [
"noreply@github.com"
] | noreply@github.com |
24e12b193bbe94063956d9ed76968f0ee0da71b5 | e22b6598a03eb5f4c875ecdffee02a3e5b4ad717 | /search.py | 4617d453572a8b0ba24ac58771fcdd418d43a8e3 | [] | no_license | JustinRohweller/AIPROJ1 | 62fac6121aff65fb72c27d3d0caa4c256a8bbac5 | f67c1b86bdd98df49a9811392668dc9322c6c72c | refs/heads/master | 2021-03-19T12:28:14.987717 | 2017-10-04T19:38:04 | 2017-10-04T19:38:04 | 105,118,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,383 | py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# Justin Rohweller Pac-man project 1
# I used the internet to figure out how to delete the last item from my array:
# newPathGuide = newPathGuide[:-1]
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
#print "Start:", problem.getStartState()
#print "Is the start a goal?", problem.isGoalState(problem.getStartState())
#print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
#print "STARTDEPTHFIRSTSEARCH"
#print ""
#print ""
from game import Directions
from util import Stack
myFringe = Stack()
exploredStates = set()
startState = [[problem.getStartState(), -1], []]
myFringe.push(startState)
if (problem.isGoalState(problem.getStartState())):
return Directions.STOP
#loop forever (only return escapes.)
while (True):
#if fringe is empty, we failed to add another item.
if (myFringe.isEmpty()):
#print 'failure fringe is empty.'
return ['failure']
#if not empty, take most recent one, check if goal, return how got there.
else:
poppedState = myFringe.pop()
if (problem.isGoalState(poppedState[0][0])):
answerArray = []
#for length of array, #print poppedStates directionArray,
# populate answerArray with Directions to reach goal.
for i in range(0, len(poppedState[1])):
if (poppedState[1][i] == "North"):
answerArray.append(Directions.NORTH)
if (poppedState[1][i] == "South"):
answerArray.append(Directions.SOUTH)
if (poppedState[1][i] == "East"):
answerArray.append(Directions.EAST)
if (poppedState[1][i] == "West"):
answerArray.append(Directions.WEST)
#print len(answerArray)
return answerArray
#if poppedState not in fringe (shouldn't be we just popped it.) or exploredState (should not explore repeated states)
# then add it to explored, and add children to the fringe.
if (not(poppedState[0][0] in exploredStates)):
exploredStates.add(poppedState[0][0])
#print "NODE EXPLORED: ", poppedState[0][0]
#call successor only on coordinates.
newSuccessors = problem.getSuccessors(poppedState[0][0])
newPathGuide = poppedState[1]
#get all successors, put them all in fringe. with how to get there.
for i in range(0, len(newSuccessors)):
newPathGuide.append(newSuccessors[i][1])
nextNode = [newSuccessors[i], newPathGuide]
myFringe.push(nextNode)
newPathGuide = newPathGuide[:-1]
#print ""
#print ""
#print "ENDDEPTHFIRSTSEARCH"
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
#print "STARTBREADTHFIRSTSEARCH"
#print ""
#print ""
from game import Directions
from util import Queue
myFringe = Queue()
exploredStates = set()
startState = [[problem.getStartState(), -1], []]
myFringe.push(startState)
if (problem.isGoalState(problem.getStartState())):
return Directions.STOP
#loop forever (only return escapes.)
while (True):
#if fringe is empty, we failed to add another item.
if (myFringe.isEmpty()):
#print 'failure fringe is empty.'
return ['failure']
#if not empty, take most recent one, check if goal, return how got there.
else:
poppedState = myFringe.pop()
if (problem.isGoalState(poppedState[0][0])):
answerArray = []
#for length of array, #print poppedStates directionArray,
# populate answerArray with Directions to reach goal.
for i in range(0, len(poppedState[1])):
if (poppedState[1][i] == "North"):
answerArray.append(Directions.NORTH)
if (poppedState[1][i] == "South"):
answerArray.append(Directions.SOUTH)
if (poppedState[1][i] == "East"):
answerArray.append(Directions.EAST)
if (poppedState[1][i] == "West"):
answerArray.append(Directions.WEST)
#print len(answerArray)
return answerArray
#if poppedState not in fringe (shouldn't be we just popped it.) or exploredState (should not explore repeated states)
# then add it to explored, and add children to the fringe.
if (not(poppedState[0][0] in exploredStates)):
exploredStates.add(poppedState[0][0])
#print "NODE EXPLORED: ", poppedState[0][0]
#call successor only on coordinates.
newSuccessors = problem.getSuccessors(poppedState[0][0])
newPathGuide = poppedState[1]
#get all successors, put them all in fringe. with how to get there.
for i in range(0, len(newSuccessors)):
newPathGuide.append(newSuccessors[i][1])
nextNode = [newSuccessors[i], newPathGuide]
myFringe.push(nextNode)
newPathGuide = newPathGuide[:-1]
#print ""
#print ""
#print "ENDBREADTHFIRSTSEARCH"
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
#print "STARTUNIFORMCOSTSEARCH"
#print ""
#print ""
from game import Directions
from util import PriorityQueue
myFringe = PriorityQueue()
exploredStates = set()
startState = [[problem.getStartState(), -1], []]
myFringe.push(startState, 0)
if (problem.isGoalState(problem.getStartState())):
return Directions.STOP
#loop forever (only return escapes.)
while (True):
#if fringe is empty, we failed to add another item.
if (myFringe.isEmpty()):
#print 'failure fringe is empty.'
return ['failure']
#if not empty, take most recent one, check if goal, return how got there.
else:
poppedState = myFringe.pop()
if (problem.isGoalState(poppedState[0][0])):
answerArray = []
#for length of array, ##print poppedStates directionArray,
# populate answerArray with Directions to reach goal.
for i in range(0, len(poppedState[1])):
if (poppedState[1][i] == "North"):
answerArray.append(Directions.NORTH)
if (poppedState[1][i] == "South"):
answerArray.append(Directions.SOUTH)
if (poppedState[1][i] == "East"):
answerArray.append(Directions.EAST)
if (poppedState[1][i] == "West"):
answerArray.append(Directions.WEST)
#print len(answerArray)
return answerArray
#if poppedState not in fringe (shouldn't be we just popped it.) or exploredState (should not explore repeated states)
# then add it to explored, and add children to the fringe.
if (not(poppedState[0][0] in exploredStates)):
exploredStates.add(poppedState[0][0])
#print "NODE EXPLORED: ", poppedState[0][0]
#call successor only on coordinates.
newSuccessors = problem.getSuccessors(poppedState[0][0])
newPathGuide = poppedState[1]
#get all successors, put them all in fringe. with how to get there.
for i in range(0, len(newSuccessors)):
newPathGuide.append(newSuccessors[i][1])
nextNode = [newSuccessors[i], newPathGuide]
myFringe.push(nextNode, nextNode[0][2])
newPathGuide = newPathGuide[:-1]
#print ""
#print ""
#print "ENDUNIFORMCOSTSEARCH"
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
#print "STARTASTARSEARCH"
#print ""
#print ""
from game import Directions
from util import PriorityQueue
myFringe = PriorityQueue()
exploredStates = set()
# heuristic(problem.getStartState(), problem)
# #print "HEURISTIC: ", heuristic(problem.getStartState(), problem)
startState = [[problem.getStartState(), -1], []]
myFringe.push(startState, heuristic(problem.getStartState(), problem))
if (problem.isGoalState(problem.getStartState())):
return Directions.STOP
#loop forever (only return escapes.)
while (True):
#if fringe is empty, we failed to add another item.
if (myFringe.isEmpty()):
#print 'failure fringe is empty.'
return ['failure']
#if not empty, take most recent one, check if goal, return how got there.
else:
poppedState = myFringe.pop()
if (problem.isGoalState(poppedState[0][0])):
answerArray = []
#for length of array, #print poppedStates directionArray,
# populate answerArray with Directions to reach goal.
for i in range(0, len(poppedState[1])):
if (poppedState[1][i] == "North"):
answerArray.append(Directions.NORTH)
if (poppedState[1][i] == "South"):
answerArray.append(Directions.SOUTH)
if (poppedState[1][i] == "East"):
answerArray.append(Directions.EAST)
if (poppedState[1][i] == "West"):
answerArray.append(Directions.WEST)
#print len(answerArray)
return answerArray
#if poppedState not in fringe (shouldn't be we just popped it.) or exploredState (should not explore repeated states)
# then add it to explored, and add children to the fringe.
if (not(poppedState[0][0] in exploredStates)):
exploredStates.add(poppedState[0][0])
#print "NODE EXPLORED: ", poppedState[0][0]
#call successor only on coordinates.
newSuccessors = problem.getSuccessors(poppedState[0][0])
newPathGuide = poppedState[1]
#get all successors, put them all in fringe. with how to get there.
for i in range(0, len(newSuccessors)):
newPathGuide.append(newSuccessors[i][1])
nextNode = [newSuccessors[i], newPathGuide]
nextNodeValue = (nextNode[0][2])+(heuristic(nextNode[0][0], problem))
myFringe.push(nextNode, nextNodeValue)
newPathGuide = newPathGuide[:-1]
#print ""
#print ""
#print "ENDASTARSEARCH"
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"jrohweller13@apu.edu"
] | jrohweller13@apu.edu |
3a4aadc1e308d89e263727e617f4f157e27c8146 | 0ffe9d5c109d00cd9ca5eb25de3b0c6d963eb8d8 | /venv/bin/easy_install-3.8 | b7e3356679aae19589e816bbbe25416fbeb0e1d9 | [] | no_license | Brunomleguizamon/Flask_ToDoList | 9b6adb7236cce8cdfc00c4f132ddade19ed5f9cc | 4f0dbb5f343adc8559a19058867e1953647f488d | refs/heads/main | 2023-08-25T20:36:48.985344 | 2021-10-20T11:04:27 | 2021-10-20T11:04:27 | 418,869,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | 8 | #!/Users/Bmleguizamon/Desktop/workspace/Flask_Projects/Flask_ToDoList/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"brunomleguizamon@gmail.com"
] | brunomleguizamon@gmail.com |
980b466c28c5040171706e805a75717fbb69f66d | ed7342bcfd051d5280c444f5a625fac507ef9b53 | /demo/basics/sum_of_numbers_v2.py | 724f67c49fef9989060ad053d4ae302ff4759cd0 | [] | no_license | srikanthpragada/PYTHON_19_MAR_2021 | 55f86289e7d6be5398c18ad9f52bfd4d81563827 | 20cd95481c1fc4c156d1fed01e29cb3b09b03333 | refs/heads/master | 2023-04-06T02:37:52.657864 | 2021-05-05T03:01:31 | 2021-05-05T03:01:31 | 350,551,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # Take numbers until 0 is given and display sum of numbers
total = 0
while True:
num = int(input("Enter a number [0 to stop] :"))
if num == 0:
break # Terminate loop
total += num
print(f"Total = {total}") | [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
76353eeb5636f6bbec22d1117dfa467ba584418d | 4ea26bf3ec34b8dc6e495372e0de143541b09ccb | /inventory/models.py | efce5773ca7427baffe05f36c84ef631164c0372 | [] | no_license | mahesh2019/inventoryms | 232e4bce9c5579c777d5e1cd45e2b775e28b84c0 | 67d104ec6f612a43262bf4e4dd32fce7fb6a36a3 | refs/heads/master | 2020-04-12T09:52:14.263635 | 2018-12-19T09:53:30 | 2018-12-19T09:53:30 | 162,411,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from decimal import Decimal
class Product(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
cetagory = models.CharField(max_length=200, blank=True, null=True)
supplier = models.CharField(max_length=200, blank=True, null=True)
unit_price = models.DecimalField(max_digits=20, decimal_places=4, default=Decimal('0.0000'))
description = models.TextField(blank=True, null=True)
def __str__(self):
return 'Id:{0} Name:{1}'.format(self.id, self.name)
| [
"mahesh281420@gmail.com"
] | mahesh281420@gmail.com |
21cb53ad9c60466e29851ca17b85630b65cdb096 | 0e70494cd1fb0958bcbf3c1dfafb008398defbb0 | /ros2doctor/ros2doctor/verb/hello.py | e023960d9228e7ddad02e4485cfc4c5c1fd9a088 | [
"Apache-2.0"
] | permissive | wayneparrott/ros2cli | 2bdbd62dc2c0b7468915dc12a8447aea77ed4524 | e9e89e037be79fd06a34399db18ef7ed79d23ffa | refs/heads/master | 2020-12-22T21:01:53.837774 | 2020-04-28T09:00:26 | 2020-04-28T09:08:31 | 236,930,967 | 1 | 0 | Apache-2.0 | 2020-01-29T07:57:06 | 2020-01-29T07:57:05 | null | UTF-8 | Python | false | false | 8,786 | py | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentTypeError
import os
import socket
import struct
import threading
import time
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from ros2doctor.verb import VerbExtension
from std_msgs.msg import String
DEFAULT_GROUP = '225.0.0.1'
DEFAULT_PORT = 49150
def positive_int(string: str) -> int:
try:
value = int(string)
except ValueError:
value = -1
if value <= 0:
raise ArgumentTypeError('value must be a positive integer')
return value
class HelloVerb(VerbExtension):
"""
Check network connectivity between multiple hosts.
This command can be invoked on multiple hosts to confirm that they can talk to each other
by using talker/listener, multicast send/receive to check topic discovering and
UDP communication.
This command outputs a summary table of msgs statistics at a custom period(s).
"""
def add_arguments(self, parser, cli_name):
parser.add_argument(
'-t', '--topic', nargs='?', default='/canyouhearme',
help="Name of ROS topic to publish to (default: '/canyouhearme')")
parser.add_argument(
'-ep', '--emit-period', metavar='N', type=float, default=0.1,
help='Time period to publish/send one message (default: 0.1s)')
parser.add_argument(
'-pp', '--print-period', metavar='N', type=float, default=1.0,
help='Time period to print summary table (default: 1.0s)')
parser.add_argument(
'--ttl', type=positive_int,
help='TTL for multicast send (default: None)')
parser.add_argument(
'-1', '--once', action='store_true', default=False,
help='Publish and multicast send for one emit period then exit; used in test case.')
def main(self, *, args):
global summary_table
summary_table = SummaryTable()
rclpy.init()
executor = SingleThreadedExecutor()
pub_node = Talker(args.topic, args.emit_period)
sub_node = Listener(args.topic)
executor.add_node(pub_node)
executor.add_node(sub_node)
try:
prev_time = time.time()
# pub/sub thread
exec_thread = threading.Thread(target=executor.spin)
exec_thread.start()
while True:
if (time.time() - prev_time > args.print_period):
summary_table.format_print_summary(args.topic, args.print_period)
summary_table.reset()
prev_time = time.time()
# multicast threads
send_thread = threading.Thread(target=_send, kwargs={'ttl': args.ttl})
send_thread.daemon = True
receive_thread = threading.Thread(target=_receive)
receive_thread.daemon = True
receive_thread.start()
send_thread.start()
time.sleep(args.emit_period)
if args.once:
return summary_table
except KeyboardInterrupt:
pass
finally:
executor.shutdown()
rclpy.shutdown()
pub_node.destroy_node()
sub_node.destroy_node()
class Talker(Node):
"""Initialize talker node."""
def __init__(self, topic, time_period, *, qos=10):
node_name = 'ros2doctor_' + socket.gethostname() + str(os.getpid()) + '_talker'
super().__init__(node_name)
self._i = 0
self._pub = self.create_publisher(String, topic, qos)
self._timer = self.create_timer(time_period, self.timer_callback)
def timer_callback(self):
msg = String()
hostname = socket.gethostname()
msg.data = f"hello, it's me {hostname}"
summary_table.increment_pub()
self._pub.publish(msg)
self._i += 1
class Listener(Node):
"""Initialize listener node."""
def __init__(self, topic, *, qos=10):
node_name = 'ros2doctor_' + socket.gethostname() + str(os.getpid()) + '_listener'
super().__init__(node_name)
self._sub = self.create_subscription(
String,
topic,
self.sub_callback,
qos)
def sub_callback(self, msg):
msg_data = msg.data.split()
pub_hostname = msg_data[-1]
if pub_hostname != socket.gethostname():
summary_table.increment_sub(pub_hostname)
def _send(*, group=DEFAULT_GROUP, port=DEFAULT_PORT, ttl=None):
"""Multicast send one message."""
hostname = socket.gethostname()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if ttl is not None:
packed_ttl = struct.pack('b', ttl)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, packed_ttl)
try:
s.sendto(f"hello, it's me {hostname}".encode('utf-8'), (group, port))
summary_table.increment_send()
finally:
s.close()
def _receive(*, group=DEFAULT_GROUP, port=DEFAULT_PORT, timeout=None):
"""Multicast receive."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except AttributeError:
# not available on Windows
pass
s.bind(('', port))
s.settimeout(timeout)
mreq = struct.pack('4sl', socket.inet_aton(group), socket.INADDR_ANY)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
try:
data, _ = s.recvfrom(4096)
data = data.decode('utf-8')
sender_hostname = data.split()[-1]
if sender_hostname != socket.gethostname():
summary_table.increment_receive(sender_hostname)
finally:
s.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, mreq)
finally:
s.close()
class SummaryTable():
"""Summarize number of msgs published/sent and subscribed/received."""
def __init__(self):
"""Initialize empty summary table."""
self.lock = threading.Lock()
self._pub = 0
self._send = 0
self._sub = {}
self._receive = {}
def reset(self):
"""Reset summary table to empty each time after printing."""
with self.lock:
self._pub = 0
self._send = 0
self._sub = {}
self._receive = {}
def increment_pub(self):
"""Increment published msg count."""
with self.lock:
self._pub += 1
def increment_sub(self, hostname):
"""Increment subscribed msg count from different host(s)."""
with self.lock:
if hostname not in self._sub:
self._sub[hostname] = 1
else:
self._sub[hostname] += 1
def increment_send(self):
"""Increment multicast-sent msg count."""
with self.lock:
self._send += 1
def increment_receive(self, hostname):
"""Increment multicast-received msg count from different host(s)."""
with self.lock:
if hostname not in self._receive:
self._receive[hostname] = 1
else:
self._receive[hostname] += 1
def format_print_summary(self, topic, print_period, *, group=DEFAULT_GROUP, port=DEFAULT_PORT):
"""Print content in a table format."""
def _format_print_summary_helper(table):
print('{:<15} {:<20} {:<10}'.format('', 'Hostname', f'Msg Count /{print_period}s'))
for name, count in table.items():
print('{:<15} {:<20} {:<10}'.format('', name, count))
print('MULTIMACHINE COMMUNICATION SUMMARY')
print(f'Topic: {topic}, Published Msg Count: {self._pub}')
print('Subscribed from:')
_format_print_summary_helper(self._sub)
print(
f'Multicast Group/Port: {group}/{port}, '
f'Sent Msg Count: {self._send}')
print('Received from:')
_format_print_summary_helper(self._receive)
print('-'*60)
| [
"noreply@github.com"
] | noreply@github.com |
dfda12ed6eaa7357e9e486024948bcda5cbc5a23 | e2a81e1e03a1deeaad9d9032e076a6e8327f65f7 | /JSS-django/api/serializers.py | b91862e583884414f7a0c4643eba36c9d2be3563 | [] | no_license | sirpqp/SubjectService | 529960a6c3172386e3d2d8d1e942dff4affcb72a | a47916a61c9e23dff55617acb878859a07b18210 | refs/heads/main | 2023-07-06T15:35:46.968783 | 2021-08-24T01:24:55 | 2021-08-24T01:24:55 | 396,760,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | from .models import *
from rest_framework import serializers
from robot.models import Dialog
from django.db.models import ObjectDoesNotExist
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
# class RoleSerializer(serializers.ModelSerializer):
# class Meta:
# model = Role
# fields = '__all__'
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = '__all__'
class SellerRegionSerializer(serializers.ModelSerializer):
class Meta:
model = SellerRegion
fields = '__all__'
class OrganSerializer(serializers.ModelSerializer):
seller2 = serializers.SerializerMethodField()
region2 = serializers.SerializerMethodField()
@staticmethod
def get_seller2(obj):
return {'id': obj.seller.seller.id, 'name': obj.seller.seller.nickname}
@staticmethod
def get_region2(obj):
return obj.seller.region.name
class Meta:
model = Organ
fields = '__all__'
class GroupSerializer(serializers.ModelSerializer):
organ2 = serializers.SerializerMethodField()
@staticmethod
def get_organ2(obj):
return obj.organ.name
class Meta:
model = Group
fields = '__all__'
class CustomerSerializer(serializers.ModelSerializer):
ex = serializers.SerializerMethodField()
@staticmethod
def get_ex(obj):
return {
'organ': {
'id': obj.organ.id,
'name': obj.organ.name
},
'group': {
'id': obj.group.id,
'name': obj.group.name,
'type': obj.group.type
}
}
class Meta:
model = Customer
fields = '__all__'
class RequestSerializer(serializers.ModelSerializer):
customer2 = serializers.SerializerMethodField()
group2 = serializers.SerializerMethodField()
registrar2 = serializers.SerializerMethodField()
@staticmethod
def get_customer2(obj):
return obj.customer.nickname
@staticmethod
def get_group2(obj):
return obj.group.name
@staticmethod
def get_registrar2(obj):
return obj.registrar.nickname
class Meta:
model = Request
fields = '__all__'
class TaskSerializer(serializers.ModelSerializer):
ex = serializers.SerializerMethodField()
@staticmethod
def get_ex(obj):
return {
'group':
'[%s]%s' % (obj.request.group.type, obj.request.group.name),
'gid':
obj.request.group.gid,
'customer':
obj.request.customer.nickname,
'wechat':
obj.request.customer.wechat,
'email':
obj.request.customer.email,
'registrar':
obj.request.registrar.nickname,
'date_registered':
obj.request.date_registered,
'res_id':
obj.resource.id if obj.resource else None,
'resource': {
'title': obj.resource.title,
'attachment': {
'name': obj.resource.attachment.name,
'url': obj.resource.attachment.url,
} if obj.resource.attachment else None,
'size': obj.resource.size,
'restype': obj.resource.restype.id,
'cost': obj.resource.cost,
'uid': obj.resource.uid,
'source': obj.resource.source,
'lang': obj.resource.lang,
'short': obj.resource.short,
} if obj.resource else None,
'replier':
obj.replier.nickname if obj.replier else None,
'receiver':
obj.receiver.nickname if obj.receiver else None,
'need_received':
TaskSerializer.chk_received(obj.request.group.gid,
obj.request.customer.wechat)
if obj.request.group.gid and obj.request.customer.wechat else False
}
@staticmethod
def chk_received(room: str, buddy: str):
dialogs = Dialog.objects.filter(room=room,
buddy=buddy,
role='custom',
is_received=False)
return len(dialogs) > 0
class Meta:
model = Task
fields = '__all__'
class ResourceSerializer(serializers.ModelSerializer):
# def get_path(self,obj):
# return obj.attachment.url
# @action(methods=['post'])
# def upload_file(request):
# form = ModelFormWithFileField(request.POST,request.FILES)
# form.save()
# return {
# 'status':'ok',
# 'size':request.FILES['file'].size,
# 'url':request.FILES['file'].url,
# }
class Meta:
model = Resource
fields = '__all__'
class ZoneSerializer(serializers.ModelSerializer):
class Meta:
model = Zone
fields = '__all__' | [
"337236126@qq.com"
] | 337236126@qq.com |
1fc637baacfe15ec87147a5c262442e79ca08358 | 34cf89f633059d0e5f2a444992c23278232aa3e3 | /stix2/test/test_hashes.py | 40ace38fa6c866669dbc64b8f40ba2db61e74ed4 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | oasis-open/cti-python-stix2 | e578fe17e42216bf2635781511be8d0b8612fcc0 | f1c1632f3aa916cfa30b0b3625200f01c12dc5ed | refs/heads/master | 2023-08-08T17:40:47.992285 | 2023-06-06T17:51:34 | 2023-06-06T17:51:34 | 81,590,907 | 336 | 112 | BSD-3-Clause | 2023-08-17T17:41:02 | 2017-02-10T17:50:12 | Python | UTF-8 | Python | false | false | 2,887 | py | import pytest
from stix2.hashes import Hash, check_hash, infer_hash_algorithm
@pytest.mark.parametrize(
"hash_name, expected_alg", [
("md5", Hash.MD5),
("md6", Hash.MD6),
("ripemd160", Hash.RIPEMD160),
("sha1", Hash.SHA1),
("sha224", Hash.SHA224),
("sha256", Hash.SHA256),
("sha384", Hash.SHA384),
("sha512", Hash.SHA512),
("sha3224", Hash.SHA3224),
("sha3256", Hash.SHA3256),
("sha3384", Hash.SHA3384),
("sha3512", Hash.SHA3512),
("ssdeep", Hash.SSDEEP),
("whirlpool", Hash.WHIRLPOOL),
("tlsh", Hash.TLSH),
("xxxx", None),
],
)
def test_hash_inference(hash_name, expected_alg):
alg = infer_hash_algorithm(hash_name)
assert alg == expected_alg
# Try some other name variations
alg = infer_hash_algorithm(hash_name[0].upper() + hash_name[1:])
assert alg == expected_alg
alg = infer_hash_algorithm("-"+hash_name)
assert alg == expected_alg
@pytest.mark.parametrize(
"hash_alg, hash_value", [
(Hash.MD5, "f9e40b9aa5464f3dae711ca524fceb63"),
(Hash.MD6, "f9e40b9aa5464f3dae711ca524fceb63"),
(Hash.RIPEMD160, "8ae5d2e6b1f3a514257f2469b637454931844aeb"),
(Hash.SHA1, "f2c7d4185880c0adcbb4a01d020a69498b16210e"),
(Hash.SHA224, "6743ed70cc26e750ad0108b6b8ad7fc2780c550f7d78adefa04dda05"),
(Hash.SHA256, "a2d1c2081aa932fe72307ab076b9739455bc7a21b3bed367bd9a86ae27af5a40"),
(Hash.SHA384, "bc846457de707f97bce93cca23b5ea58c0326fd8b79ef7b523ba1d0a792f22868732e53a5dcf2f9e3b89eecca9c9b4e3"),
(Hash.SHA512, "896e45c82f9d8ba917d4f95891c967b88304b0a67ccc59aac813ee7ab3bc700bf9ce559e283c35ddba619755f6b70bdff2a07dc9cd337576a143a2aa361d08b1"),
(Hash.SHA3224, "37cb283bc9f6ecf0f94e92d5bd4c1e061ae00d7ed85804d18f981f53"),
(Hash.SHA3256, "d5fc146e37d4fddaeaa57aa88390be5c9ca6bcb18ae1bf2346cbfc36d3310ea2"),
(Hash.SHA3384, "ac97414589b2ef59a87dc5277d156b6cfc8f6b92b7c0e889d8f38a235dd9c1ba4030321beddd13f29519390ba914f70f"),
(Hash.SHA3512, "8dc580ad3abc6305ce5ada7c5920c763720c7733c2a94d28dd5351ffbc162b6b6d21371d91d6559124159025172e19896e09889047aac4ef555cc55456e14b0a"),
(Hash.SSDEEP, "3:AXGBicFlgVNhBGcL6wCrFQEv:AXGHsNhxLsr2C"),
(Hash.WHIRLPOOL, "b752b6eeb497a8bebfc1be1649ca41d57fd1973bffc2261ca196b5474e0f353762f354c1d743581f61c51f4d86921360bc2e8ad35e830578b68b12e884a50894"),
(Hash.TLSH, "6FF02BEF718027B0160B4391212923ED7F1A463D563B1549B86CF62973B197AD2731F8"),
("foo", "bar"), # unrecognized hash type is accepted as-is
],
)
def test_hash_check(hash_alg, hash_value):
assert check_hash(hash_alg, hash_value)
assert check_hash(hash_alg, hash_value.upper()) # check case sensitivity
def test_hash_check_fail():
for hash_alg in Hash:
assert not check_hash(hash_alg, "x"*200)
| [
"chisholm@mitre.org"
] | chisholm@mitre.org |
07e944922bc77134b25da05b26ed94d7f5a4cc15 | 7fa4d48dfded9bcd5d9c113b38cd28c4b644f2eb | /Project 3: Data Warehouse/etl.py | c00815f8d3eb3da5d42060492bda15c641e225f2 | [] | no_license | MalvikaBodh/Data-Engineering | 8d322e1beba36d1c42aa6da53d67eac4ec093183 | 208de8cf576e8b28be6783270156d6a2a54a2166 | refs/heads/main | 2023-06-21T12:09:05.081108 | 2021-07-25T03:28:14 | 2021-07-25T03:28:14 | 350,126,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | import configparser
import psycopg2
from sql_queries import copy_table_queries, insert_table_queries
def load_staging_tables(cur, conn):
"""This function will load our staging tables and we're using copy command to load staging data from s3"""
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
"""This function will use our newly created staging tables and those will be used to insert data into our new star schema tables"""
for query in insert_table_queries:
cur.execute(query)
conn.commit()
def main():
"""This function will connect to our cluster using configurations in dwh.cfg and then run both the load table and insert table functions"""
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
bab1e109f5044efb3f4da3a62fb4d9a403a5c094 | dfda488f07064cf4545843aa18da00a2631b2299 | /font-caveat/setup.py | c9c4fd6bedb6f3da39f667d09b381ccda6b3bf4c | [] | no_license | castrofernandez/fonts-python | 38f8301f7ad82f16c07a3e80b97c4e485944b0ae | f8593844538a6239f06f23e304b8af0a532d28bd | refs/heads/master | 2022-11-13T12:47:21.828240 | 2020-07-02T17:42:54 | 2020-07-02T17:42:54 | 274,605,596 | 0 | 0 | null | 2020-06-24T07:36:09 | 2020-06-24T07:36:08 | null | UTF-8 | Python | false | false | 2,506 | py | #!/usr/bin/env python
"""
MIT License
Copyright (c) 2020 Juan Castro Fernández
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
classifiers = ['Development Status :: 5 - Production/Stable',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: SIL Open Font License 1.1 (OFL-1.1)',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Text Processing :: Fonts']
setup(
name = 'font-caveat',
version = '0.0.1',
author = 'Juan Castro Fernández',
author_email = 'hola@juancastro.es',
description = 'Caveat font',
long_description= open('README.rst').read() + '\n' + open('CHANGELOG.txt').read(),
license = 'SIL OFL 1.1',
keywords = 'Kaushan Script Font',
url = 'https://github.com/castrofernandez/fonts-python',
classifiers = classifiers,
py_modules = [],
packages = ['font_caveat'],
package_data = {'font_caveat': ['font_caveat/files']},
entry_points = {
'fonts_ttf': [
'font-caveat = font_caveat:font_files'
]
},
zip_safe = False,
include_package_data = True
)
| [
"castrofernandez@gmail.com"
] | castrofernandez@gmail.com |
dd167235a75d178c486cf5e338db8bdff5431bd2 | 12a12cd19b25352b57aa3729189ebfee8574c62a | /testse2e/common.py | 58e6d940a6e0ecef3d93c5966d4c576dc4c09065 | [] | no_license | Patrik-Stas/InstabotPatrik | b40fbff68f149bee7bf76e52fa5b71679b234e7c | 69d015c690186cbd8aec844425741a5b5252c279 | refs/heads/master | 2020-03-17T22:54:08.724835 | 2018-04-11T06:35:13 | 2018-04-11T06:37:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from testsUnit.context import instabotpatrik
import os
def get_path_to_file_in_directory_of_this_file(file_name):
this_directory_absolute = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
return os.path.join(this_directory_absolute, file_name)
def get_config():
return instabotpatrik.config.Config(config_path=get_path_to_file_in_directory_of_this_file("e2e.ini"))
| [
"patrik.stas@concur.com"
] | patrik.stas@concur.com |
66f7a5c960b0854fe94b6ba044edbb41cf14e1de | 15a28c177721b62aa9976a2d61496b5ddc9c91a9 | /setup.py | 85cd2f6cd9f31448184929216e764e16a4804ec0 | [
"MIT"
] | permissive | haywse/feems | 404e4cf36da4acee0f425d985070977ac33ffd35 | 4fb3fa84470a2b87021f929ddc36703e7a2e2c4a | refs/heads/main | 2023-04-18T10:01:57.137826 | 2021-04-28T03:12:19 | 2021-04-28T03:12:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | #!/usr/bin/env python
from setuptools import setup
version = "1.0.0"
required = open("requirements.txt").read().split("\n")
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="feems",
version=version,
description="Fast Estimation of Effective Migration Surfaces (feems)",
long_description=long_description,
long_description_content_type="text/markdown",
author="[jhmarcus, haywse]",
author_email="[jhmarcus@uchicago.edu, haywse@gmail.com]",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
url="https://github.com/jhmarcus/feems",
packages=["feems"],
install_requires=required,
include_package_data=True,
package_data={
"": [
"data/grid_250.shp",
"data/grid_250.shx",
"data/grid_100.shp",
"data/grid_100.shx",
"data/wolvesadmix.bed",
"data/wolvesadmix.coord",
"data/wolvesadmix.fam",
"data/wolvesadmix.bim",
"data/wolvesadmix.outer",
"data/wolvesadmix.diffs",
]
},
license="MIT",
)
| [
"jhmarcus@uchicago.edu"
] | jhmarcus@uchicago.edu |
318e6a0c5427da811a19e24e577d6e94e37eee94 | 574189454870b5469804aab2866ffbffc3669785 | /erikagnvall-python3/day05.py | f5ea40791948d4fd63a08643ec63e226b014e7b8 | [
"Apache-2.0"
] | permissive | jakeru/advent_of_code_2019 | 63fe51301af2c21e2a0a8bc5c3abcb59b0756d5e | d906d55095707942a051788b1d41ba12e1af66cd | refs/heads/master | 2020-09-24T08:27:55.138122 | 2019-12-11T21:35:03 | 2019-12-11T21:36:20 | 225,713,610 | 0 | 0 | Apache-2.0 | 2019-12-03T20:53:32 | 2019-12-03T20:53:31 | null | UTF-8 | Python | false | false | 2,627 | py | import os.path
from intcode import run
def _read_input():
with open(os.path.basename(__file__).replace('.py', '.txt')) as f:
program = tuple(int(i) for i in f.readline().split(','))
return program
def part1(memory):
_, _, _, outputs = run(memory, inputs=[1])
return outputs[-1]
def part2(memory):
_, _, _, outputs = run(memory, inputs=[5])
return outputs[-1]
program = _read_input()
print(part1(list(program)))
print(part2(list(program)))
############
# Tests
def test_run_example():
mem, ip, inputs, outputs = run([3, 0, 4, 0, 99], inputs=[42])
assert mem == [42, 0, 4, 0, 99]
assert ip == 4
assert not inputs
assert outputs == [42]
memory = [1002, 4, 3, 4, 33]
assert run(memory) == ([1002, 4, 3, 4, 99], 4, [], [])
memory = [1101, 100, -1, 4, 0]
assert run(memory) == ([1101, 100, -1, 4, 99], 4, [], [])
def test_opcode8():
memory = [3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8]
_, _, _, outputs = run(memory, inputs=[8])
assert outputs == [1]
memory = [3, 3, 1108, -1, 8, 3, 4, 3, 99]
_, _, _, outputs = run(memory, inputs=[8])
assert outputs == [1]
memory = [3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8]
_, _, _, outputs = run(memory, inputs=[4])
assert outputs == [0]
memory = [3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8]
_, _, _, outputs = run(memory, inputs=[4])
assert outputs == [0]
def test_opcode7():
memory = [3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8]
_, _, _, outputs = run(memory, inputs=[8])
assert outputs == [0]
memory = [3, 3, 1107, -1, 8, 3, 4, 3, 99]
_, _, _, outputs = run(memory, inputs=[8])
assert outputs == [0]
memory = [3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8]
_, _, _, outputs = run(memory, inputs=[4])
assert outputs == [1]
memory = [3, 3, 1107, -1, 8, 3, 4, 3, 99]
_, _, _, outputs = run(memory, inputs=[4])
assert outputs == [1]
def test_jumps():
memory = [3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9]
_, _, _, outputs = run(memory, inputs=[0])
assert outputs == [0]
memory = [3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1]
_, _, _, outputs = run(memory, inputs=[0])
assert outputs == [0]
memory = [3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9]
_, _, _, outputs = run(memory, inputs=[123])
assert outputs == [1]
memory = [3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1]
_, _, _, outputs = run(memory, inputs=[123])
assert outputs == [1]
def test_part1():
assert part1(list(program)) == 14155342
def test_part2():
assert part2(list(program)) == 8684145
| [
"erik.jansson@cognibotics.com"
] | erik.jansson@cognibotics.com |
7d83082d756b0f82bbbe4ad530fa037045e22460 | cd88c0c19c1be2b1cc6b9331c97f4ee67b5d0640 | /event/migrations/0004_auto_20160919_1107.py | a2a359d83af59dbe7065f67a1e1845e616605a63 | [] | no_license | al-alamin/msnb | eed3cb2e7252ad87c7866072b8c1c73ad626bdf3 | 451b8dfc3729424bb1d9ee2b1b592d671537171c | refs/heads/master | 2022-05-10T05:54:46.149661 | 2022-05-03T20:54:18 | 2022-05-03T20:54:18 | 68,234,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('event', '0003_auto_20160820_1347'),
]
operations = [
migrations.AlterField(
model_name='event',
name='presenter',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
| [
"tareqbuet@gmail.com"
] | tareqbuet@gmail.com |
e5e539147b37966eaf9509f87efb5d4f05343e42 | 860518269a26868520a441c0f82c6a87a9fa78e9 | /find_and_replace_pattern.py | 872a3e58f8db7461f97f5c545e55b0cedbb21e74 | [] | no_license | aliabbasrizvi/leetcode | 99e41b924a89fe01302e1c3d623927b792005e46 | 4c33a022984bb6e711d0def1bb9c47a5c22b3b29 | refs/heads/master | 2021-06-30T10:01:29.218059 | 2020-12-21T01:06:32 | 2020-12-21T01:06:32 | 49,443,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import collections
class Solution(object):
def build_pattern(self, word_for_pattern):
char_to_position = collections.OrderedDict()
for idx, char in enumerate(list(word_for_pattern)):
if char in char_to_position:
char_to_position[char].append(idx)
else:
char_to_position[char] = [idx]
return char_to_position.values()
def findAndReplacePattern(self, words, pattern):
"""
:type words: List[str]
:type pattern: str
:rtype: List[str]
"""
pattern_to_compare = self.build_pattern(pattern)
similar_words = []
for word in words:
if len(word) != len(pattern):
continue
pattern_for_word = self.build_pattern(word)
if pattern_for_word == pattern_to_compare:
similar_words.append(word)
return similar_words
| [
"ali@optimizely.com"
] | ali@optimizely.com |
4b17e3f76ea3f25cd1e3d6b4d7551e54d5734be4 | 33ee13e169f5405f9f808f7746178dfd56963e42 | /main.py | 058048b27efe66b869b56089c21d76311327ea2b | [] | no_license | 23akei/SabeyaLogger | 472bc8e65269078302b1fc0f04888af4a63db67a | ed48464ed7285e0dac5f61385638df3336a02350 | refs/heads/master | 2023-05-18T01:41:09.838749 | 2021-05-29T13:56:02 | 2021-05-29T13:56:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | from flask import Flask, render_template, escape, request, send_from_directory, url_for
import sqlite3
app = Flask(__name__)
@app.route("/")
def top():
return render_template("top.html")
@app.route("/hoge", methods = ["POST"])
def record():
id_num = 0
if request.method == None:
return render_template("error.html")
id_input = str(request.form["id"])
if(len(id_input) == 9):
id_num = int(id_input)
elif(len(id_input) == 13):
id_num = int(id_input[3:11])
else:
return render_template("error.html")
con = sqlite3.connect("./data.db")
cur = con.cursor()
former_log = list(cur.execute("select * from Log where person=? order by date_time;", (id_num)))
member = list(cur.execute("select name from where id=? Member;"), (id_num))
if id_num not in [row["id"] for row in member]:
return render_template("error.html")
status_register = None
if former_log[-1]["status"] == "in":
status_register = "out"
elif former_log[-1]["status"] == "out":
status_register = "in"
else:
# status not defined
return render_template("error.html")
sql_register = "insert into Log(status, person) values(?, ?);"
cur.execute(sql_register, (status_register, id_num))
return render_template("success.html",status=status_register,name=member[0]["name"])
if __name__ == "__main__":
app.run() | [
"tom0pr01fr.olem.001@gmail.com"
] | tom0pr01fr.olem.001@gmail.com |
d617aaac35275cf070b7f5bd47f28582080b01ae | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /kosmos-2/fairseq/fairseq/models/speech_to_speech/__init__.py | d34883552596496799514422e5a895376d02f735 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 248 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .modules import * # noqa
from .s2s_transformer import * # noqa
| [
"1083127130@qq.com"
] | 1083127130@qq.com |
2115a212a8cb5407557c8168b509d0dfcce1be84 | 4378bc1f97baf39dfc64f6b189f540bcebbf8772 | /generate-test-data.py | 9c36ccb065d57b70a5faf090d29d0187b2b97963 | [] | no_license | liulangmeng/nmf-spark | fbdd04291605860f287e3b96513880c231ef4da6 | a38e49948ed7faf15e8d05c7677677bdb172feb1 | refs/heads/master | 2021-01-13T03:47:38.438368 | 2016-12-23T03:09:41 | 2016-12-23T03:09:41 | 77,192,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from h5py import File
import numpy as np
# about 3 GB
m = int(2e6)
n = 200
k = 100
r = 20
W = np.random.random((m,r))
H = np.zeros((r, n))
H[:, :r] = np.eye(r)
H[:, r:] = np.random.random((r, n-r))
for i in np.arange(2, 20, 1):
temp = H[:, i]
H[:, i] = H[:, 2*i]
H[:, 10*i] = temp
fout = File("testdata.h5", "w")
fout.create_dataset("mat", data=W.dot(H))
fout.close(t)
| [
"mxyliulangmeng@126.com"
] | mxyliulangmeng@126.com |
404e972b7f3afe1279c55a210a28a4005c5a754c | 797a4bbcee31943d9cad71537da88edebf63c7ad | /main.py | 404163e74fe51ca71fcb9e675b94bd237e49182a | [] | no_license | Fratealin/exchange_rate_retriever | 9bc21e86c4586ac4de6b10903902c17807bb2488 | 37fca8a955a751880d4f8fa9b5aa70752a778ffd | refs/heads/master | 2022-05-29T13:25:12.269692 | 2020-05-02T16:33:12 | 2020-05-02T16:33:12 | 260,727,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | import get_exchange_rate_data
import datetime
import file_writer
import email_manager
import get_trip_destination
import time
# get email update every 24 hrs
def get_daily_exchange_rate():
currencies = get_exchange_rate_data.get_currencies_list()
destination_currency = get_trip_destination.get_trip_destination(currencies)
for day in range(5):
today = datetime.datetime.now()
if today.strftime("%A") in ["Saturday", "Sunday"]:
print("As there is no exchange rate data on weekends, we will provide it on Monday.")
oneDay = 60*60*24
time.sleep(oneDay)
continue
todaysRate = get_exchange_rate_data.get_exchange_rate_data(destination_currency)
file_name = todaysRate[0] + "_exchange_rates.txt"
file_writer.create_csv_file(file_name)
date = today.strftime("%A %B %d %Y, %H:%M")
new_csv_data = [file_name, date, todaysRate[1]]
file_writer.write_csv(new_csv_data)
previousRates = file_writer.read_csv(file_name)
email_manager.create_email(previousRates, date, todaysRate)
oneDay = 60*60*24
time.sleep(oneDay)
get_daily_exchange_rate()
| [
"ali_sensei2013@gmail.com"
] | ali_sensei2013@gmail.com |
03ece2f4bb8c3b14f00121fd73d1286b8e0e795d | be3f53f6d61dd21b79a3ac1e079aa0149069a912 | /lambdas_and_built_in_functions/filter.py | 07329a3270c835004e5bf2ccc31414f43a6c4716 | [] | no_license | ksompura/python_training | 769e76fc73b2d40106bd4db2f52d1257ef1f01c3 | 8675bb5e415ba56eda85c1efeef969c8bd74f848 | refs/heads/main | 2023-03-29T14:52:50.801711 | 2021-03-30T05:00:17 | 2021-03-30T05:00:17 | 345,509,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # filter is like map, but it is used to filter out a thing based on some conditions
# filter uses True and False to determine what is filtered out
users = [
{"username":"sam","tweets":["Pizza is good","Yummmmm"]},
{"username":"kale","tweets":["Kale chips, yeaaaa"]},
{"username":"jeff","tweets":[]},
{"username":"nico","tweets":[]},
{"username":"angel","tweets":["CATS"]},
{"username":"michael","tweets":[]}
]
# inactive_users = list(filter(lambda n: not n["tweets"] == 0,users))
## MOST OFTEN IN PYTHON PEOPLE USE LIST COMPREHENSIONS INSTEAD OF THIS, this is more of what is used in some other languages
# print(inactive_users)
usernames = list(map(lambda u: u["username"].upper(),
filter(lambda n: not n["tweets"],users)))
print(usernames) | [
"keshavsomp@gmail.com"
] | keshavsomp@gmail.com |
b01dce14fc9f5589006b2b7d410ad1ef9b9fee58 | 1ffdfe3588d39fd91c8f41a60c475c4e1cbb0126 | /pro07/person/urls.py | 937b70ac8d50da75dd2e2757ef73a9d5c0fee5e6 | [] | no_license | xddongx/study-Django | b52279d5b44e3cde7b1694472ff42f92e9b463c8 | c0ed149fddf9b1c39ab778103456ea2808188dd9 | refs/heads/main | 2023-04-15T03:31:42.657210 | 2021-04-21T08:48:28 | 2021-04-21T08:48:28 | 307,752,639 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register(f'persons', PersonViewSet)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| [
"31644115+xddongx@users.noreply.github.com"
] | 31644115+xddongx@users.noreply.github.com |
ef835c8ab8f9b1d665e298b1da78b17ab7380731 | 135d2c02b3ad706573bdfafa75ebc14bd170ef97 | /firedex-static/sdn-controller/sdn_controller.py | d19c338814f705b0ff1ecde6b5649d74806fa4f2 | [] | no_license | boulouk/firedex | 4afc6467bd83e096051d941699e59f1be806a46c | 187012986f4adf85d017e84a64db7c9bb1f447b0 | refs/heads/master | 2022-06-06T01:56:38.464322 | 2019-11-24T09:44:03 | 2019-11-24T09:44:03 | 138,659,150 | 2 | 1 | null | 2022-05-20T20:55:18 | 2018-06-25T23:09:54 | Python | UTF-8 | Python | false | false | 376 | py |
from ryu.cmd import manager
applications = ["topology_application", "flow_application"]
def run_controller(applications):
arguments = []
arguments.extend(applications)
arguments.append("--observe-links")
arguments.append("--enable-debugger")
manager.main( args = arguments )
if __name__ == '__main__':
run_controller(applications = applications)
| [
"lucascalz8@gmail.com"
] | lucascalz8@gmail.com |
c2155b38173125504ea953047536dfada8b480da | e6fad75ea5843b25fef33c9eb1048a12a52c030b | /first.py | 8563bf17b9ef8524107ac05b0f5ff9f604b80edc | [] | no_license | shirsenh/Playing-With-Matplotlib | 40d1a66097a4fde8d38b2cfc8a2dd80c093c092c | 340cfab4f4848cd7150e44e6d5e0275e2fca243d | refs/heads/master | 2021-06-25T20:53:16.843597 | 2017-08-24T01:34:38 | 2017-08-24T01:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = [1,2,3,4,5,6,7,8,9,10],[5,6,2,3,13,4,1,2,4,8],[2,3,3,3,5,7,9,11,9,10]
ax.plot_wireframe(X, Y, Z)
plt.show() | [
"haldershirsendu1@gmail.com"
] | haldershirsendu1@gmail.com |
5b4139f4d254caa962680dbd2803d3a308ba364b | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /virtualticketpermissionsplugin/trunk/virtualticketpermissions/policy.py | a5555ce1fb694ee48ff392de846dd5835f425203 | [
"BSD-3-Clause"
] | permissive | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,412 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Noah Kantrowitz
# Copyright (C) 2008 Norman Rasmussen
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from trac.core import *
from trac.perm import IPermissionRequestor, IPermissionGroupProvider, IPermissionPolicy, PermissionSystem
from trac.ticket.model import Ticket
from trac.config import IntOption, ListOption
from trac.util.compat import set
class VirtualTicketPermissionsPolicy(Component):
"""Central tasks for the VirtualTicketPermissions plugin."""
implements(IPermissionRequestor, IPermissionPolicy)
group_providers = ExtensionPoint(IPermissionGroupProvider)
blacklist = ListOption('virtualticketpermissions', 'group_blacklist', default='anonymous, authenticated',
doc='Groups that do not affect the common membership check.')
virtual_permissions = set([
'TICKET_IS_REPORTER',
'TICKET_IS_OWNER',
'TICKET_IS_CC',
'TICKET_IS_REPORTER_GROUP',
'TICKET_IS_OWNER_GROUP',
'TICKET_IS_CC_GROUP',
])
# IPermissionPolicy(Interface)
def check_permission(self, action, username, resource, perm):
if username == 'anonymous' or \
not action in self.virtual_permissions:
# In these two cases, checking makes no sense
return None
if 'TRAC_ADMIN' in perm:
# In this case, checking makes no sense
return True
# Look up the resource parentage for a ticket.
while resource:
if resource.realm == 'ticket':
break
resource = resource.parent
if resource and resource.realm == 'ticket' and resource.id is not None:
return self.check_ticket_permissions(action, perm, resource)
return None
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['TICKET_IS_REPORTER', 'TICKET_IS_OWNER', 'TICKET_IS_CC']
group_actions = ['TICKET_IS_REPORTER_GROUP', 'TICKET_IS_OWNER_GROUP', 'TICKET_IS_CC_GROUP']
all_actions = actions + [(a+'_GROUP', [a]) for a in actions]
return all_actions + [('TICKET_IS_SELF', actions), ('TICKET_IS_GROUP', group_actions)]
# Public methods
def check_ticket_permissions(self, action, perm, res):
"""Return if this req is generating permissions for the given ticket ID."""
try:
tkt = Ticket(self.env, res.id)
except TracError:
return None # Ticket doesn't exist
if action == 'TICKET_IS_SELF':
return tkt['reporter'] == perm.username or \
perm.username == tkt['owner'] or \
perm.username in [x.strip() for x in tkt['cc'].split(',')]
if action == 'TICKET_IS_REPORTER':
return tkt['reporter'] == perm.username
if action == 'TICKET_IS_CC':
return perm.username in [x.strip() for x in tkt['cc'].split(',')]
if action == 'TICKET_IS_OWNER':
return perm.username == tkt['owner']
if action == 'TICKET_IS_GROUP':
result = self._check_group(perm.username, tkt['reporter']) or \
self._check_group(perm.username, tkt['owner'])
for user in tkt['cc'].split(','):
#self.log.debug('Private: CC check: %s, %s', req.authname, user.strip())
if self._check_group(perm.username, user.strip()):
result = True
return result
if action == 'TICKET_IS_REPORTER_GROUP':
return self._check_group(perm.username, tkt['reporter'])
if action == 'TICKET_IS_OWNER_GROUP':
return self._check_group(perm.username, tkt['owner'])
if action == 'TICKET_IS_CC_GROUP':
result = False
for user in tkt['cc'].split(','):
#self.log.debug('Private: CC check: %s, %s', req.authname, user.strip())
if self._check_group(perm.username, user.strip()):
result = True
return result
# We should never get here
return None
# Internal methods
def _check_group(self, user1, user2):
"""Check if user1 and user2 share a common group."""
user1_groups = self._get_groups(user1)
user2_groups = self._get_groups(user2)
both = user1_groups.intersection(user2_groups)
both -= set(self.blacklist)
#self.log.debug('PrivateTicket: %s&%s = (%s)&(%s) = (%s)', user1, user2, ','.join(user1_groups), ','.join(user2_groups), ','.join(both))
return bool(both)
def _get_groups(self, user):
# Get initial subjects
groups = set([user])
for provider in self.group_providers:
for group in provider.get_permission_groups(user):
groups.add(group)
perms = PermissionSystem(self.env).get_all_permissions()
repeat = True
while repeat:
repeat = False
for subject, action in perms:
if subject in groups and action.islower() and action not in groups:
groups.add(action)
repeat = True
return groups | [
"rjollos@7322e99d-02ea-0310-aa39-e9a107903beb"
] | rjollos@7322e99d-02ea-0310-aa39-e9a107903beb |
771a2bf6caaa7ad3e08d7d92a9dd0f6c8d49b9a8 | f74119a55ff5d4e89f5b7fb7da24a23828e1c203 | /test_labeler.py | 0ee0907d1d02f413876674b0d058a669f89f461d | [
"MIT"
] | permissive | mdlaskey/yolo_labeler | 3f15dd229f6a5e01e508c5141345ff9363717b94 | 93463ee54ee8773e7c2ce2368a95c4c1102e712c | refs/heads/master | 2021-08-16T00:50:10.238386 | 2017-09-20T22:49:40 | 2017-09-20T22:49:40 | 96,812,011 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,526 | py | import os,sys
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import cPickle
import copy
import glob
import yolo.config as cfg
import cPickle as pickle
import IPython
class TestLabeler(object):
def __init__(self):
self.cache_path = cfg.CACHE_PATH
self.image_path = cfg.IMAGE_PATH
self.label_path = cfg.LABEL_PATH
self.batch_size = cfg.BATCH_SIZE
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.classes = cfg.CLASSES
self.class_to_ind = dict(zip(self.classes, xrange(len(self.classes))))
def check_label(self,frame):
label_path = cfg.LABEL_PATH+frame+'.p'
label_data = pickle.load(open(label_path,'r'))
for objs in label_data['objects']:
box_ind = objs['box_index']
class_label = objs['num_class_label']
print "CLASS LABEL"
print class_label
print "BOX INDEX"
print box_ind
def check_frame(self,frame):
image_path = cfg.IMAGE_PATH+frame+'.png'
image = cv2.imread(image_path)
cv2.imshow('debug',image)
cv2.waitKey(0)
def image_read(self, imname, flipped=False):
image = cv2.imread(imname)
image = cv2.resize(image, (self.image_size, self.image_size))
# cv2.imshow('debug',image)
# cv2.waitKey(30)
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image = (image / 255.0) * 2.0 - 1.0
if flipped:
image = image[:, ::-1, :]
return image
def load_bbox_annotation(self, label):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
label_data = pickle.load(open(label,'r'))
num_objs = label_data['num_labels']
label = np.zeros((self.cell_size, self.cell_size, 5+cfg.NUM_LABELS))
for objs in label_data['objects']:
box_ind = objs['box_index']
class_label = objs['num_class_label']
x_ind = int(box_ind[0] * self.cell_size / self.image_size)
y_ind = int(box_ind[1] * self.cell_size / self.image_size)
label[y_ind, x_ind, 0] = 1
label[y_ind, x_ind, 1:5] = box_ind
label[y_ind, x_ind, 5 + class_label] = 1
return label, num_objs
if __name__ == '__main__':
tl = TestLabeler()
frame = 'frame_1771'
tl.check_label(frame)
tl.check_frame(frame)
| [
"mdlaskey@umich.edu"
] | mdlaskey@umich.edu |
1842c2f22f63707d7611f23c86f8e2e01d22d89d | 3ce2e64a18e0d8769429a849ed61b13d6b2c0c5b | /scheduler.py | 467b7a5a5827421c072a10db8e95b39d57b2f832 | [] | no_license | xiufengliu/DLS | fa74f6fdcafba0222e0edc372cbde52334faab12 | df4d9b671c71853544130b0e7cc37a0808f6de72 | refs/heads/master | 2020-03-25T05:19:51.588425 | 2018-08-03T15:05:45 | 2018-08-03T15:05:45 | 143,441,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,075 | py | import schedule
import time
from app import db
def batchJob1Min():
try:
result = db.session.execute("select script from essex_job_que where seg_type=1 and next_execute_time<=now()")
scripts = result.fetchall()
if scripts:
for row in scripts:
sqlStatements = row[0].split(";")
for sql in sqlStatements:
db.session.execute(sql)
db.session.execute(
"update essex_job_que set next_execute_time=now()+interval'%s' where seg_type=1 and next_execute_time<=now()" % '1minutes')
db.session.commit()
else:
print('No pending jobs')
except Exception as e:
print(e)
def batchJob10Min():
try:
result = db.session.execute("select script from essex_job_que where seg_type=3 and next_execute_time<=now()")
scripts = result.fetchall()
if scripts:
for row in scripts:
sqlStatements = row[0].split(";")
for sql in sqlStatements:
db.session.execute(sql)
db.session.execute(
"update essex_job_que set next_execute_time=now()+interval'%s' where seg_type=3 and next_execute_time<=now()" % '10minutes')
db.session.commit()
else:
print('No pending jobs')
except Exception as e:
print(e)
#schedule.every().hour.do(job)
#schedule.every().day.at("10:30").do(job)
#schedule.every().monday.do(job)
#schedule.every().wednesday.at("13:15").do(job)
class Job:
def __init__(self):
self.started = False
schedule.every(1).minutes.do(batchJob1Min)
schedule.every(10).minutes.do(batchJob10Min)
def stop(self):
self.started = False
def start(self):
if self.started:
return
else:
self.started = True
while self.started:
schedule.run_pending()
time.sleep(1)
def status(self):
return 'Running' if self.started else 'Stopped'
job = Job() | [
"groupme@gmail.com"
] | groupme@gmail.com |
55452e8eaf3c675ee734d7d08b29328ed897b400 | 344b654cbb8b13d683bcd2cacf522c983287a5fe | /Exercises/fileExtension.py | 295ca1b77df26281183deef41448b83bb4510202 | [] | no_license | tchaitanya2288/pyproject01 | d869522584ab498008e67e81c209472ab20685c2 | 565660b73039db6f0e9ed986504c2f96ba674f9c | refs/heads/master | 2020-03-15T13:18:21.480443 | 2018-06-19T18:44:47 | 2018-06-19T18:44:47 | 132,163,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | Filename = input('Enter your required filename:')
Extension = Filename.split('.')
print("The Extension of file is:" +repr(Extension[-1])) | [
"tchaitanya.2288@gmail.com"
] | tchaitanya.2288@gmail.com |
23766bceb270d73585937f8eb705efca167b4426 | c3b739b07214507bf1023b926c19d30784623e98 | /segme/model/cascade_psp/refine.py | b8419f1aa09101135ce9339c1be00c9ec1fa696d | [
"MIT"
] | permissive | templeblock/segme | 20a96787500c46483cb7af0db917207fcedafb0b | 8192ed066558c1ea1e7283805b40da4baa5b3827 | refs/heads/master | 2023-08-30T12:31:39.327283 | 2021-11-11T17:08:40 | 2021-11-11T17:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,917 | py | import cv2
import numpy as np
import tensorflow as tf
from keras import backend
from tensorflow_hub import KerasLayer
class Refiner:
def __init__(self, hub_uri, max_size=900):
self.model = KerasLayer(hub_uri)
self.max_size = max_size
self.image = tf.Variable(
shape=(1, None, None, 3), dtype='uint8', initial_value=np.zeros((1, 0, 0, 3)).astype(np.uint8))
self.mask = tf.Variable(
shape=(1, None, None, 1), dtype='uint8', initial_value=np.zeros((1, 0, 0, 1)).astype(np.uint8))
self.prev = tf.Variable(
shape=(1, None, None, 1), dtype='uint8', initial_value=np.zeros((1, 0, 0, 1)).astype(np.uint8))
def __call__(self, image, mask, fast=False):
fine, coarse = self._global_step(image, mask)
if fast:
return fine
return self._local_step(image, fine, coarse)
def _global_step(self, image, mask):
height_width = image.shape[:2]
if max(height_width) < self.max_size:
image = Refiner._resize_max_side(image, self.max_size, cv2.INTER_CUBIC)
mask = Refiner._resize_max_side(mask, self.max_size, cv2.INTER_LINEAR)
elif max(height_width) > self.max_size:
image = Refiner._resize_max_side(image, self.max_size, cv2.INTER_AREA)
mask = Refiner._resize_max_side(mask, self.max_size, cv2.INTER_AREA)
fine, coarse = self._safe_predict(image, mask)
if max(height_width) < self.max_size:
fine = Refiner._resize_fixed_size(fine, height_width, interpolation=cv2.INTER_AREA)
coarse = Refiner._resize_fixed_size(coarse, height_width, interpolation=cv2.INTER_AREA)
elif max(height_width) > self.max_size:
fine = Refiner._resize_fixed_size(fine, height_width, interpolation=cv2.INTER_LINEAR)
coarse = Refiner._resize_fixed_size(coarse, height_width, interpolation=cv2.INTER_LINEAR)
return fine, coarse
def _local_step(self, image, fine, coarse, padding=16):
height, width = fine.shape[:2]
grid_mask = np.zeros_like(fine, dtype=np.uint32)
grid_weight = np.zeros_like(fine, dtype=np.uint32)
step_size = self.max_size // 2 - padding * 2
used_start_idx = set()
for x_idx in range(width // step_size + 1):
for y_idx in range(height // step_size + 1):
start_x = x_idx * step_size
start_y = y_idx * step_size
end_x = start_x + self.max_size
end_y = start_y + self.max_size
# Shift when required
if end_x > width:
end_x = width
start_x = width - self.max_size
if end_y > height:
end_y = height
start_y = height - self.max_size
# Bound x/y range
start_x = max(0, start_x)
start_y = max(0, start_y)
end_x = min(width, end_x)
end_y = min(height, end_y)
# The same crop might appear twice due to bounding/shifting
start_idx = start_y * width + start_x
if start_idx in used_start_idx:
continue
used_start_idx.add(start_idx)
# Take crop
part_image = image[start_y:end_y, start_x:end_x, :]
part_mask = fine[start_y:end_y, start_x:end_x]
part_prev = coarse[start_y:end_y, start_x:end_x]
# Skip when it is not an interesting crop anyway
part_mean = (part_mask > 127).astype(np.float32).mean()
if part_mean > 0.9 or part_mean < 0.1:
continue
grid_fine, _ = self._safe_predict(part_image, part_mask, part_prev)
# Padding
pred_sx = pred_sy = 0
pred_ex = self.max_size
pred_ey = self.max_size
if start_x != 0:
start_x += padding
pred_sx += padding
if start_y != 0:
start_y += padding
pred_sy += padding
if end_x != width:
end_x -= padding
pred_ex -= padding
if end_y != height:
end_y -= padding
pred_ey -= padding
grid_mask[start_y:end_y, start_x:end_x] += grid_fine[pred_sy:pred_ey, pred_sx:pred_ex]
grid_weight[start_y:end_y, start_x:end_x] += 1
# Final full resolution output
grid_weight_ = grid_weight.astype(np.float32) + backend.epsilon()
grid_mask = np.round(grid_mask.astype(np.float32) / grid_weight_).astype(np.uint8)
fine = np.where(grid_weight == 0, fine, grid_mask)
return fine
def _safe_predict(self, image, mask, prev=None):
if len(image.shape) != 3:
raise ValueError('Wrong image supplied')
if image.dtype != 'uint8':
raise ValueError('Wrong image dtype')
if len(mask.shape) != 2:
raise ValueError('Wrong mask supplied')
if mask.dtype != 'uint8':
raise ValueError('Wrong mask dtype')
if prev is not None and len(prev.shape) != 2:
raise ValueError('Wrong prev supplied')
if prev is not None and prev.dtype != 'uint8':
raise ValueError('Wrong prev dtype')
height, width = image.shape[:2]
_image = np.pad(image, ((0, height % 8), (0, width % 8), (0, 0)))
_mask = np.pad(mask, ((0, height % 8), (0, width % 8)))
_prev = _mask if prev is None else np.pad(prev, ((0, height % 8), (0, width % 8)))
self.image.assign(_image[None, ...])
self.mask.assign(_mask[None, ..., None])
self.prev.assign(_prev[None, ..., None])
fine, coarse = self.model([self.image, self.mask, self.prev])
fine, coarse = fine[0, :height, :width, 0], coarse[0, :height, :width, 0]
fine = np.round(fine * 255).astype(np.uint8)
coarse = np.round(coarse * 255).astype(np.uint8)
return fine, coarse
@staticmethod
def _resize_max_side(image, max_size, interpolation=cv2.INTER_LINEAR):
if len(image.shape) > 3 or len(image.shape) < 2:
raise ValueError('Wrong image supplied')
aspect = max_size / max(image.shape[:2])
return cv2.resize(image, (0, 0), fx=aspect, fy=aspect, interpolation=interpolation)
@staticmethod
def _resize_fixed_size(image, height_width, interpolation=cv2.INTER_LINEAR):
if len(image.shape) > 3 or len(image.shape) < 2:
raise ValueError('Wrong image supplied')
if len(height_width) != 2:
raise ValueError('Wrong desired size supplied')
return cv2.resize(image, height_width[::-1], interpolation=interpolation)
| [
"shkarupa.alex@gmail.com"
] | shkarupa.alex@gmail.com |
34ad15bcc03e457d2b3ad2ccd42aae05b50b0160 | 7d310e0ab34c9d33b11202f60188202330fcacf8 | /Basic_grammer/for/for_star_tree.py | 15da12c0a32c0c40e2a4312f3896a6e5eb831b31 | [] | no_license | minseung73/kms | 5010f580acaa38fade4b262564a22cd68ced9a71 | 946b7d09ebeb6b4555535c169ce1469144e0eb7a | refs/heads/master | 2020-08-29T12:23:09.628838 | 2019-11-25T12:35:58 | 2019-11-25T12:35:58 | 218,029,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | for i in range(6):
print("*"*i)
for i in range(6):
print("*"*(6-i)) | [
"bria051@naver.com"
] | bria051@naver.com |
1bfc305c082437bc2aadcf82bad18fc7287eca39 | 86afc6b35e262e60e7758f05884558389bfd388e | /day_15.py | 974eaf8959682a22266011f9a49a911c13c65c88 | [] | no_license | flbdx/AoC_2019 | c7c633fe615716a3ef2b20db0fabfa26b852cee1 | 5bb06e8b6a57cf8dc9a3a2d0f132af301a335abf | refs/heads/master | 2022-12-26T11:46:42.129493 | 2022-12-09T22:29:08 | 2022-12-09T22:29:08 | 226,040,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,666 | py | #!/usr/bin/python3
#encoding: UTF-8
import fileinput
import sys
from intcodecomp import IntComputer
import enum
if len(sys.argv) == 1:
sys.argv += ["input_15"]
class LabBot(object):
class Direction(enum.Enum):
NORTH = 1
SOUTH = 2
WEST = 3
EAST = 4
def turn_left(self):
if self == LabBot.Direction.NORTH:
return LabBot.Direction.WEST
if self == LabBot.Direction.WEST:
return LabBot.Direction.SOUTH
if self == LabBot.Direction.SOUTH:
return LabBot.Direction.EAST
if self == LabBot.Direction.EAST:
return LabBot.Direction.NORTH
def turn_right(self):
if self == LabBot.Direction.NORTH:
return LabBot.Direction.EAST
if self == LabBot.Direction.WEST:
return LabBot.Direction.NORTH
if self == LabBot.Direction.SOUTH:
return LabBot.Direction.WEST
if self == LabBot.Direction.EAST:
return LabBot.Direction.SOUTH
def prog_to_memory(self, prog):
n = 0
res = {}
for v in prog.split(','):
res[n] = int(v)
n += 1
return res
def __init__(self, program):
self.comp = IntComputer()
mem = self.prog_to_memory(program)
self.comp.set_mem(mem)
self.world = {(0, 0): 1}
self.distances = {(0, 0): 0}
self.pos = [0, 0]
self.target = None
self.direction = LabBot.Direction.NORTH
self.distance = 0
def walk_p1(self):
steps = 0
while steps < 3500:
self.comp.push_input(self.direction.value)
self.comp.run(break_after_output=True)
res = self.comp.read_output()
#print((self.pos, self.direction, res))
if res == 0: # WALL
t = None
if self.direction == LabBot.Direction.NORTH:
t = (self.pos[0], self.pos[1] + 1)
elif self.direction == LabBot.Direction.SOUTH:
t = (self.pos[0], self.pos[1] - 1)
elif self.direction == LabBot.Direction.EAST:
t = (self.pos[0] + 1, self.pos[1])
else:
t = (self.pos[0] - 1, self.pos[1])
self.world[t] = 0
self.direction = self.direction.turn_left()
elif res == 1 or res == 2: # MOVED 1 STEP
if self.direction == LabBot.Direction.NORTH:
self.pos[1] += 1
elif self.direction == LabBot.Direction.SOUTH:
self.pos[1] -= 1
elif self.direction == LabBot.Direction.EAST:
self.pos[0] += 1
else:
self.pos[0] -= 1
self.direction = self.direction.turn_right()
self.world[tuple(self.pos)] = res
d = self.distance + 1
rec_d = self.distances.get(tuple(self.pos), None)
if rec_d == None:
self.distances[tuple(self.pos)] = d
self.distance = d
elif d < rec_d:
self.distances[tuple(self.pos)] = d
self.distance = d
elif rec_d < d:
self.distance = rec_d
if res == 2:
self.target = self.pos.copy()
steps += 1
min_x = min([p[0] for p in self.world.keys()])
max_x = max([p[0] for p in self.world.keys()])
min_y = min([p[1] for p in self.world.keys()])
max_y = max([p[1] for p in self.world.keys()])
#max_d = max(self.distances.values()) + 1
s = ""
for y in range(max_y, min_y - 1, -1):
for x in range(min_x, max_x + 1):
if x == 0 and y == 0:
s += 'o'
else:
v = self.world.get((x, y), -1)
if v == 0:
s += '#'
elif v == 1:
s += ' '
#s += repr((self.distances[(x, y)] * 10) // max_d)
elif v == 2:
s += 'X'
else:
s += '?'
s += "\n"
print(s)
def walk_p2(self):
steps = 0
while steps < 3500:
self.comp.push_input(self.direction.value)
self.comp.run(break_after_output=True)
res = self.comp.read_output()
if res == 0: # WALL
t = None
if self.direction == LabBot.Direction.NORTH:
t = (self.pos[0], self.pos[1] + 1)
elif self.direction == LabBot.Direction.SOUTH:
t = (self.pos[0], self.pos[1] - 1)
elif self.direction == LabBot.Direction.EAST:
t = (self.pos[0] + 1, self.pos[1])
else:
t = (self.pos[0] - 1, self.pos[1])
self.world[t] = 0
self.direction = self.direction.turn_left()
elif res == 1 or res == 2: # MOVED 1 STEP
if self.direction == LabBot.Direction.NORTH:
self.pos[1] += 1
elif self.direction == LabBot.Direction.SOUTH:
self.pos[1] -= 1
elif self.direction == LabBot.Direction.EAST:
self.pos[0] += 1
else:
self.pos[0] -= 1
self.direction = self.direction.turn_right()
self.world[tuple(self.pos)] = res
if res == 2:
self.target = self.pos.copy()
break
steps += 1
self.distances = {tuple(self.target): 0}
steps = 0
while steps < 3500:
self.comp.push_input(self.direction.value)
self.comp.run(break_after_output=True)
res = self.comp.read_output()
if res == 0: # WALL
self.direction = self.direction.turn_left()
elif res == 1 or res == 2: # MOVED 1 STEP
if self.direction == LabBot.Direction.NORTH:
self.pos[1] += 1
elif self.direction == LabBot.Direction.SOUTH:
self.pos[1] -= 1
elif self.direction == LabBot.Direction.EAST:
self.pos[0] += 1
else:
self.pos[0] -= 1
self.direction = self.direction.turn_right()
d = self.distance + 1
rec_d = self.distances.get(tuple(self.pos), None)
if rec_d == None:
self.distances[tuple(self.pos)] = d
self.distance = d
elif d < rec_d:
self.distances[tuple(self.pos)] = d
self.distance = d
elif rec_d < d:
self.distance = rec_d
if res == 2:
self.target = self.pos.copy()
break
steps += 1
def p1():
for line in fileinput.input():
bot = LabBot(line)
bot.walk_p1()
print(bot.distances[tuple(bot.target)])
p1()
def p2():
for line in fileinput.input():
bot = LabBot(line)
bot.walk_p2()
print(max(bot.distances.values()))
p2()
| [
"46577074+flbdx@users.noreply.github.com"
] | 46577074+flbdx@users.noreply.github.com |
c7db867a68cfc633338475e43990083bb406cd98 | 1564d12d61f669ce9f772f3ef7563167f7fe13bf | /codeforces/educationalRound73/A-books.py | 77e50053332612a3e54fa06049612ac125655ecd | [] | no_license | sakshamk6999/codingPractice | 73ec4873defb0f0d2e47173150a589ee12e5e0a1 | f727aac6d87448b19fc9d48660dc6978fe5edc14 | refs/heads/master | 2020-12-01T20:22:36.299535 | 2020-02-04T05:55:53 | 2020-02-04T05:55:53 | 230,757,937 | 0 | 0 | null | 2020-02-12T20:38:12 | 2019-12-29T14:00:22 | Python | UTF-8 | Python | false | false | 316 | py | for _ in range(int(input())):
n = int(input())
a = list(map(int, input().split()))
dp = [0 for i in range(n)]
for i in range(n - 2, -1, -1):
if a[i] == a[i + 1]:
dp[i] = dp[i + 1]
else:
dp[i] = n - 1 - i
for i in dp:
print(i, end=" ")
print('') | [
"sakshamkhatwani@gmail.com"
] | sakshamkhatwani@gmail.com |
4b7d04c5de2f897b35e6ea61fc5a14077a9d6ef7 | 9f91ce42e1982ded6f77e184a0c6e35331b9ad23 | /greedy_color/main.py | 9308c47eb7dcc321bf983e03e6c97dfc36b2951d | [
"MIT"
] | permissive | dixler/graph-coloring | b5b1b5aeb91d24ba4f94fc1b837225019327c885 | 6a5e853b9a88bdddfd8a02c75dfe588f26eddaba | refs/heads/master | 2020-04-10T14:17:53.701941 | 2018-12-15T09:44:36 | 2018-12-15T09:44:36 | 161,073,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | #!/usr/bin/env python3
# gonna make a stupid algorithm
import json
import sys
num_colors = 0
graph = json.loads(input())
num_nodes = len(graph)
class Node():
def __init__(self):
self.color = None
self.neighbors = set()
nodes = {int(i): Node() for i, val in graph.items()}
# add edges to graph
for k, val in graph.items():
nodes[int(k)].neighbors = set(val)
# add inbound edges
for k, adj_list in graph.items():
for endpoint in adj_list:
nodes[endpoint].neighbors |= {int(k)}
def recursive_color(graph, start_index):
'determines the color of interconnected nodes'
global num_colors
node = graph[start_index]
if node.color != None:
'we already colored it'
return
else:
neighbor_colors = {graph[neighbor_id].color for neighbor_id in node.neighbors}
new_color_id = 0
while new_color_id in neighbor_colors:
new_color_id += 1
node.color = new_color_id
num_colors = max(num_colors, new_color_id+1)
for neighbor_id in node.neighbors:
recursive_color(graph, neighbor_id)
return
# make a stack of unvisited nodes
graph = {int(k): v for k, v in graph.items()}
unvisited = {k for k, v in graph.items()}
while unvisited != set():
start_index = max(unvisited)
recursive_color(nodes, start_index)
unvisited = unvisited - {k for k, node in nodes.items() if node.color != None}
print('satisfiable with %d colors' % num_colors)
for k, node in nodes.items():
print((k, node.color), end=', ')
| [
"you@example.com"
] | you@example.com |
f39ba693f9984287400dc51c6fd3384c2c8d4aad | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/92/usersdata/216/46367/submittedfiles/atividade.py | 6f93a371a202140a4fcb7fb058a09a066cd9d666 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # -*- coding: utf-8 -*-
import math
n=int(input('Digite um número:'))
soma=0
if n>0:
for i in range(0,n,1):
if n>=0:
i=i+1
soma=soma+((i)/(n))
n=n-1
else:
n=n*(-1)
i=i+1
soma=soma+((i)/(n))
n=n-1
else:
n=n*(-1)
print('%.5f'%soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1b5dde44a062a74cb90f2e60d15903012ccb7620 | eff2fc11905f6118dcd70050392f168cd7aea086 | /leetcode/5_longest_palindromic_substring/solution2.py | dc6f8c44f995cff0b89286e6dbc72af866bea932 | [] | no_license | algobot76/leetcode-python | 28f1e1107fa941a3b40006f074eec6231e674ac1 | ec8bff8978d6915bfdf187c760b97ee70f7515af | refs/heads/master | 2021-07-05T17:06:40.581977 | 2020-09-19T22:02:38 | 2020-09-19T22:02:38 | 199,255,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | class Solution:
def longestPalindrome(self, s):
n = len(s)
if n < 2:
return s
f = [[False] * n for _ in range(n)]
ans = ""
for i in range(n - 1, -1, -1):
for j in range(i, n):
if s[i] == s[j]:
if self._get_len(i, j) > 2:
if f[i + 1][j - 1]:
f[i][j] = True
else:
f[i][j] = True
if f[i][j]:
if self._get_len(i, j) > len(ans):
ans = s[i:j + 1]
return ans
def _get_len(self, i, j):
return j - i + 1
| [
"xkaitian@gmail.com"
] | xkaitian@gmail.com |
e088b87c645c90af9868abefc5d1e8640ba3ecb5 | be68da7fcdf533e58970897f91a78f12a44b720c | /FSMonitor.py | 5ad6d4edf4cfc3d2c2e8bf200efea37f67e79315 | [] | no_license | lishuaijie0816/FSMonitor | 809e811b9a5fe2f38eead04183f5cd594fad1934 | dcf374db62ed9487103468af61088a77260bdc1c | refs/heads/master | 2020-06-21T03:39:05.313957 | 2019-07-17T08:03:29 | 2019-07-17T08:03:29 | 197,334,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Event Name Is an Event Description
# IN_ACCESS Yes file was accessed.
# IN_ATTRIB Yes metadata changed.
# IN_CLOSE_NOWRITE Yes unwrittable file was closed.
# IN_CLOSE_WRITE Yes writtable file was closed.
# IN_CREATE Yes file/dir was created in watched directory.
# IN_DELETE Yes file/dir was deleted in watched directory.
# IN_DELETE_SELF Yes 自删除,即一个可执行文件在执行时删除自己
# IN_DONT_FOLLOW No don’t follow a symlink (lk 2.6.15).
# IN_IGNORED Yes raised on watched item removing. Probably useless for you, prefer instead IN_DELETE*.
# IN_ISDIR No event occurred against directory. It is always piggybacked to an event. The Event structure automatically provide this information (via .is_dir)
# IN_MASK_ADD No to update a mask without overwriting the previous value (lk 2.6.14). Useful when updating a watch.
# IN_MODIFY Yes file was modified.
# IN_MOVE_SELF Yes 自移动,即一个可执行文件在执行时移动自己
# IN_MOVED_FROM Yes file/dir in a watched dir was moved from X. Can trace the full move of an item when IN_MOVED_TO is available too, in this case if the moved item is itself watched, its path will be updated (see IN_MOVE_SELF).
# IN_MOVED_TO Yes file/dir was moved to Y in a watched dir (see IN_MOVE_FROM).
# IN_ONLYDIR No only watch the path if it is a directory (lk 2.6.15). Usable when calling .add_watch.
# IN_OPEN Yes file was opened.
# IN_Q_OVERFLOW Yes event queued overflowed. This event doesn’t belongs to any particular watch.
# IN_UNMOUNT Yes 宿主文件系统被 umount
import os
from pyinotify import WatchManager, Notifier, \
ProcessEvent, IN_DELETE, IN_CREATE, IN_MODIFY, IN_CLOSE_WRITE
def call_back(file):
print("call back at file:{}".format(file))
class EventHandler(ProcessEvent):
"""事件处理"""
def __init__(self, fun):
self.fun = fun
def process_IN_CREATE(self, event):
print(
"Create file: %s " % os.path.join(event.path, event.name))
def process_IN_DELETE(self, event):
print(
"Delete file: %s " % os.path.join(event.path, event.name))
def process_IN_MODIFY(self, event):
print(
"Modify file: %s " % os.path.join(event.path, event.name))
def process_IN_CLOSE_WRITE(self, event):
print(
"Close Write file: %s " % os.path.join(event.path, event.name))
if self.fun != None:
self.fun(os.path.join(event.path, event.name))
def FSMonitor(path='.', fun=None):
wm = WatchManager()
mask = IN_DELETE | IN_CREATE | IN_MODIFY | IN_CLOSE_WRITE
notifier = Notifier(wm, EventHandler(fun))
wm.add_watch(path, mask, rec=True)
print('now starting monitor %s' % (path))
while True:
try:
notifier.process_events()
if notifier.check_events():
notifier.read_events()
except KeyboardInterrupt:
notifier.stop()
break
if __name__ == "__main__":
FSMonitor(path=".", fun=call_back)
| [
"noreply@github.com"
] | noreply@github.com |
bbed6ffc0de45ccedf5d739d5a96324c1eab399c | f67c3312d92a87c8f5858fac5e82970a012bfcbe | /squares/controllers/table.py | 744ab3709d774875397d838b265094057ff5eb83 | [
"MIT"
] | permissive | DestroyingWind/Squares | ae3268e11e4cb2291e2c2e7a2806ea38f55c14c4 | 3b331c38b577e67e5c08ab24f2900ed73d88c467 | refs/heads/master | 2021-01-21T16:35:41.500027 | 2017-06-01T09:46:48 | 2017-06-01T09:46:48 | 91,899,568 | 0 | 0 | null | 2017-05-20T15:51:16 | 2017-05-20T15:51:16 | null | UTF-8 | Python | false | false | 3,060 | py | from squares.models.play.table import Table
from squares.models.schema import get_axis_by_schema_id
from squares.errors.table import OutRangeError, TakeError
class TableController:
TABLE_MAX = 1024
TABLE_ID = 'table_id_{}'
def __init__(self, table_id, player_id=0):
self.table_id = table_id
self.table = Table.get_by_id(table_id)
self.player_id = player_id
self.players = self.table.players
@property
def player_n(self):
if self.player_id:
for index, player_id in enumerate(self.players):
if player_id == self.player_id:
return index + 1
@property
def turn(self):
return self.table.turn
@property
def is_owner(self):
return self.player_n == 1
@property
def is_start(self):
return self.table.is_started
def start(self):
if self.player_n != 1:
raise TakeError('Only onwer can start the game!')
if self.player_id != self.table.owner:
raise TakeError('Your are not the onwer!')
self.table.start()
def join(self, player_id):
self.table.join(player_id)
self.player_id = player_id
def step(self, schema_id, position, rotate=0, symmetry=False):
axises = get_axis_by_schema_id(schema_id, position, rotate, symmetry)
self._check(axises)
self.table.step(axises, self.player_n)
def quit(self):
self.table.quit(self.player_id)
def _check(self, axises):
self.is_opposite = False
for item in axises:
self.is_legal(item)
if not self.is_opposite:
raise TakeError('Must be in the opposite of your chess!')
def is_legal(self, axis):
if not self._check_out(axis[0]) or not self._check_out(axis[1]):
raise OutRangeError('Out of range!')
if self.squares[axis[0]][axis[1]]:
raise TakeError('Wrong location!')
self._check_touch(axis)
self._check_opposite(axis)
def _check_out(self, index):
return 0 < index < len(self.squares)
def _check_touch(self, axis):
for op in _touch:
new_ax = [axis[0] + op[0], axis[1] + op[1]]
if self._check_out(new_ax):
if self.player_n == self._chess_n(new_ax):
raise TakeError('Adjacent to your chess!')
return True
def _check_opposite(self, axis):
for op in _opposite:
new_ax = [axis[0] + op[0], axis[1] + op[1]]
if self._check_out(new_ax):
if self.player_n == self._chess_n(new_ax):
self.is_opposite = True
break
def _chess_n(self, axis):
return self.squares[axis[0]][axis[1]]
@property
def squares(self):
return self.table.situation()
@property
def status(self):
return self.table.status
_touch = [
[0, -1],
[0, 1],
[1, 0],
[-1, 0],
]
_opposite = [
[-1, -1],
[1, 1],
[-1, 1],
[1, -1],
]
| [
"wangtao101rs@163.com"
] | wangtao101rs@163.com |
e5389b6f96d7b53ab8a61f670edde6d6581d4925 | 1334014eaeffdde4886da49ce5f387c46a5cc23a | /2020-2/kakao/2020/Senario2.py | 82fc1c3d2689193e3ffe636a1ac40926025d5ebe | [] | no_license | gusah009/Algorithm | a85b1a4526a885b1f809c23c55565f453a5294c4 | 5da952e16a6f90f663706675812f042707aa7280 | refs/heads/main | 2023-07-31T17:28:20.404630 | 2021-09-05T06:14:00 | 2021-09-05T06:14:00 | 357,753,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,900 | py | import requests
import pprint
import json
url = 'https://pegkq2svv6.execute-api.ap-northeast-2.amazonaws.com/prod/users'
global LEN
LEN = 60
global BOARD_SIZE
BOARD_SIZE = LEN * LEN
global moving
moving = [1,5,-1,-5]
global move
move = [True for i in range(10)]
global pp
pp = pprint.PrettyPrinter(indent=2)
def start(X_AUTH_TOKEN, problem):
headers = {
'X-Auth-Token': X_AUTH_TOKEN,
'Content-Type': 'application/json',
}
data = '{ "problem": '+ str(problem) + ' }'
uri = url + '/start'
return requests.post(uri, headers=headers, data=data).json()
def locations(token):
uri = url + '/locations'
return requests.get(uri, headers={'Authorization': token}).json()
def trucks(token):
uri = url + '/trucks'
return requests.get(uri, headers={'Authorization': token}).json()
def simulator(token, data):
headers = {
'Authorization': token,
'Content-Type': 'application/json',
}
uri = url + '/simulate'
return requests.put(uri, headers=headers, data=data)
def score(token):
uri = url + '/score'
return requests.get(uri, headers={'Authorization': token}).json()
class Truck():
def __init__(self, id):
self.id = int(id)
def trcCommand1(self, token):
command = []
loc = locations(token)
trc = trucks(token)
ID = trc['trucks'][self.id]['location_id']
cnt = loc['locations'][ID]['located_bikes_count']
count = cnt - 3
c = 0
state = False
while c < 10: # Truck 1분동안 할 수 있는 행동 수
while count > 0:
if c == 10:
break
command.append(5)
# pp.pprint(loc['locations'][ID]['located_bikes_count'])
count -= 1
c += 1
if trc['trucks'][self.id]['loaded_bikes_count'] != 0:
while count < 0:
if c == 10:
break
command.append(6)
# pp.pprint(loc['locations'][ID]['located_bikes_count'])
count += 1
c += 1
if c == 10:
break
if move[self.id]:
if ID == BOARD_SIZE - 1:
command.append(3)
ID += moving[2]
move[self.id] = False
elif ID % (LEN * 2) == 4 or ID % (LEN * 2) == 5:
command.append(2)
ID += moving[1]
elif ID % (LEN * 2) < 60:
command.append(1)
ID += moving[0]
elif 60 <= (ID % (LEN * 2)):
command.append(3)
ID += moving[2]
else:
if ID == 0:
command.append(1)
ID += moving[0]
move[self.id] = True
elif ID % (LEN * 2) == 0 or ID % (LEN * 2) == 9:
command.append(4)
ID += moving[3]
elif ID % (LEN * 2) < 60:
command.append(3)
ID += moving[2]
elif 60 <= (ID % (LEN * 2)):
command.append(1)
ID += moving[0]
c += 1
cnt = loc['locations'][ID]['located_bikes_count']
count = cnt - 3
# print(self.id , " " , command)
return command
def trcCommand2(self, token):
command = []
loc = locations(token)
trc = trucks(token)
ID = trc['trucks'][self.id]['location_id']
cnt = loc['locations'][ID]['located_bikes_count']
count = loc['locations'][ID]['located_bikes_count'] - 3
for i in range(10): # Truck 1분동안 할 수 있는 행동 수
if count > 0:
command.append(5)
# pp.pprint(loc['locations'][ID]['located_bikes_count'])
count -= 1
elif count < 0:
command.append(6)
# pp.pprint(loc['locations'][ID]['located_bikes_count'])
count += 1
else:
if move[self.id]:
if ID == BOARD_SIZE - 1:
command.append(3)
ID += moving[2]
move[self.id] = False
elif ID % (LEN * 2) == 4 or ID % (LEN * 2) == 5:
command.append(2)
ID += moving[1]
elif ID % (LEN * 2) < 5:
command.append(1)
ID += moving[0]
elif 5 <= ID % (LEN * 2):
command.append(3)
ID += moving[2]
else:
if ID == 0:
command.append(1)
ID += moving[0]
move[self.id] = True
elif ID % (LEN * 2) == 0 or ID % (LEN * 2) == 9:
command.append(4)
ID += moving[3]
elif ID % (LEN * 2) < 5:
command.append(3)
ID += moving[2]
elif 5 <= ID % (LEN * 2):
command.append(1)
ID += moving[0]
cnt = loc['locations'][ID]['located_bikes_count']
count = loc['locations'][ID]['located_bikes_count'] - 4
def p0_simulator():
X_Auth_Token = 'dd6ac65a77bd2045ebabf7e2357d7f2c'
problem = 2
ret = start(X_Auth_Token, problem)
token = ret['auth_key']
print(ret)
trc = []
for i in range(10):
trc.append(Truck(i))
for k in range(2):
command = []
data = '{ "commands": [ '
for i in range(10):
for j in range(10):
if j < i:
command.append(2)
else:
command.append(0)
if i != 9:
data += '{ "truck_id": '+ str(i) +', "command": '+ str(command) +' },'
else:
data += '{ "truck_id": '+ str(i) +', "command": '+ str(command) +' }'
data += ' ] }'
sim = simulator(token, data)
print(sim.content)
while(sim.content[11:16] == b'ready'):
data = '{ "commands": [ '
for i in range(5):
command = trc[i].trcCommand1(token)
if i != 4:
data += '{ "truck_id": '+ str(i) +', "command": '+ str(command) +' },'
else:
data += '{ "truck_id": '+ str(i) +', "command": '+ str(command) +' }'
data += ' ] }'
sim = simulator(token, data)
pp.pprint(sim.content)
# trc = trucks(token)
# pp.pprint(trc['trucks'])
pp.pprint(score(token))
if __name__ == '__main__':
p0_simulator()
| [
"minion@bmeks.co.kr"
] | minion@bmeks.co.kr |
2eedf0e0dbb2f5a75909791d40dfecad3658332e | d12847b0deaaaf640bf192f1e06e53dd281f98e7 | /analysis/fitting.py | f24c854b205e9977d5317a0ba98420579e0724b6 | [] | no_license | HelmutFedder/pi3diamond | 3fe9e727db68c3339610d58121e8bb6463e47cbe | 2d5c1292039b0717a1328d5e2b60ce433e8f3a82 | refs/heads/master | 2021-01-11T03:22:55.570737 | 2016-10-15T21:54:55 | 2016-10-15T21:54:55 | 71,015,077 | 10 | 11 | null | null | null | null | UTF-8 | Python | false | false | 20,417 | py | """
This file is part of pi3diamond, a toolkit for
confocal scanning, anti-bunching, FLIM, pulsed ODMR / NMR,
and more sophisticated quantum physics experiments,
typically performed with NV centers in diamond,
written in python using the enthought traits packages.
pi3diamond is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pi3diamond is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with diamond. If not, see <http://www.gnu.org/licenses/>.
Copyright (C) 2009-2016 Helmut Fedder <helmut@fedder.net>
"""
import numpy as np
from scipy.optimize import leastsq
from scipy.special import gammaincc
from scipy.stats import mode
########################################################
# utility functions
########################################################
def baseline(y,n=None):
"""
Returns the baseline of 'y'. 'n' controls the discretization.
The difference between the maximum and the minimum of y is discretized into 'n' steps.
"""
if not n: # estimate a useful number for the histogram bins from shot noise
y_min = y.min()
y_max = y.max()
y_typ = np.max(np.abs((y_min,y_max)))
n = (y_max-y_min)/y_typ**0.5
hist, bin_edges = np.histogram(y,int(n))
return bin_edges[hist.argmax()]
def find_edge(y,bins=20):
"""Returns edge of a step function"""
h,b=np.histogram(y,bins=bins)
i0 = bins/2
i = h[i0:].argmax()+i0
threshold = 0.5*(b[0]+b[i])
return np.where(y>threshold)[0][0]
def run_sum(y, n=10):
"""Calculates the running sum over 'y' (1D array) in a window with 'n' samples."""
N = len(y)
yp = np.empty(N)
for i in range(N):
if i+n > N:
yp[i]=yp[N-n] # pad the last array entries with the last real entry
else:
yp[i]=np.sum(y[i:i+n])
return yp
########################################################
# non-linear least square fitting
########################################################
def fit(x, y, model, estimator):
"""Perform least-squares fit of two dimensional data (x,y) to model 'Model' using Levenberg-Marquardt algorithm.\n
'Model' is a callable that takes as an argument the model parameters and returns a function representing the model.\n
'Estimator' can either be an N-tuple containing a starting guess of the fit parameters, or a callable that returns a respective N-tuple for given x and y."""
if callable(estimator):
#return leastsq(lambda pp: model(*pp)(x) - y, estimator(x,y), warning=False)[0]
return leastsq(lambda pp: model(*pp)(x) - y, estimator(x,y))[0]
else:
#return leastsq(lambda pp: model(*pp)(x) - y, estimator, warning=False)[0]
return leastsq(lambda pp: model(*pp)(x) - y, estimator)[0]
def nonlinear_model(x, y, s, model, estimator, message=False):
"""Performs a non-linear least-squares fit of two dimensional data and a primitive error analysis.
parameters:
x = x-data
y = y-data
s = standard deviation of y
model = the model to use for the fit. must be a factory function
that takes as parameters the parameters to fit and returns
a function y(x)
estimator = either an n-tuple (or array) containing the starting guess
of the fit parameters or a callable that takes x and y
as arguments and returns a starting guess
return values:
p = set of parameters that minimizes the chisqr
cov = covariance matrix
q = probability of obtaining a chisqr larger than the observed one
if 0.9 > q > 0.1 the fit is credible
if q > 0.001, the fit may be credible if we expect that the
reason for the small q are non-normal distributed errors
if q < 0.001, the fit must be questioned. Possible causes are
(i) the model is not suitable
(ii) the standard deviations s are underestimated
(iii) the standard deviations s are not normal distributed
if q > 0.9, the fit must be questioned. Possible causes are
(i) the standard deviations are overestimated
(ii) the data has been manipulated to fit the model
chisqr0 = sum over chisqr evaluated at the minimum
"""
chisqr = lambda p: ( model(*p)(x) - y ) / s
if callable(estimator):
p = estimator(x,y)
else:
p = estimator
result = leastsq(chisqr, p, full_output=True)
if message:
print result[4], result[3]
p = result[0]
cov = result[1]
# there are some cases where leastsq doesn't raise an exception, however returns None for
# the covariance matrix. To prevent 'invalid index' errors in functions that call nonlinear_model,
# we replace the 'None' by a matrix with right dimension filled with np.NaN.
if cov is None:
cov = np.NaN * np.empty( (len(p),len(p)) )
chi0 = result[2]['fvec']
chisqr0 = np.sum(chi0**2)
nu = len(x) - len(p)
q = gammaincc(0.5*nu,0.5*chisqr0)
return p, cov, q, chisqr0
########################################################
# standard factory function for non-linear fitting
########################################################
def Cosinus(a, T, c):
"""Returns a Cosinus function.
f = a\cos(2\pi(x-x0)/T)+c
Parameter:
a = amplitude
T = period
x0 = position
c = offset in y-direction
"""
return lambda x: a*np.cos( 2*np.pi*x/float(T) ) + c
setattr(Cosinus, 'formula', r'$cos(c,a,T;x)=a\cos(2\pi x/T)+c$')
def CosinusEstimator(x, y):
c = y.mean()
a = 2**0.5 * np.sqrt( ((y-c)**2).sum() )
# better to do estimation of period from
Y = np.fft.fft(y)
N = len(Y)
D = float(x[1] - x[0])
i = abs(Y[1:N/2+1]).argmax()+1
T = (N * D) / i
return a, T, c
def CosinusNoOffset(a, T):
"""Returns a Cosinus function without constant offset.
f = a\cos(2\pi(x-x0)/T)
Parameter:
a = amplitude
T = period
x0 = position
"""
return lambda x: a*np.cos( 2*np.pi*x/float(T) )
setattr(CosinusNoOffset, 'formula', r'$cos(a,T;x)=a\cos(2\pi x/T)$')
def CosinusNoOffsetEstimator(x, y):
a = 2**0.5 * np.sqrt( (y**2).sum() )
# better to do estimation of period from
Y = np.fft.fft(y)
N = len(Y)
D = float(x[1] - x[0])
i = abs(Y[1:N/2+1]).argmax()+1
T = (N * D) / i
return a, T
def ExponentialZero(a, w, c):
"""Exponential centered at zero.
f = a*exp(-x/w) + c
Parameter:
a = amplitude
w = width
c = offset in y-direction
"""
return lambda x: a*np.exp(-x/w)+c
def ExponentialZeroEstimator(x, y):
"""Exponential Estimator without offset. a*exp(-x/w) + c"""
c=y[-1]
a=y[0]-c
w=x[-1]*0.5
return a, w, c
def GaussianZero(a, w, c):
"""Gaussian function centered at zero.
f = a*exp(-(x/w)**2) + c
Parameter:
a = amplitude
w = width
c = offset in y-direction
"""
return lambda x: a*np.exp( -(x/w)**2 ) + c
setattr(GaussianZero, 'formula', r'$f(a,w,c;x)=a\exp(-(x/w)^2)+c$')
def GaussianZeroEstimator(x, y):
"""Estimator for GaussianZero: a*exp(-0.5*(x/w)**2) + c"""
c=y[-1]
a=y[0]-c
w=x[-1]*0.5
return a, w, c
def Gaussian(c, a, x0, w):
"""Gaussian function.
f = a*exp( -0.5(x-x0)**2 / w**2 ) + c
Parameter:
a = amplitude
w = width
c = offset in y-direction
"""
return lambda x: c + a*np.exp( -0.5*((x-x0)/w)**2 )
setattr(Gaussian, 'formula', r'$f(c,a,x0,w;x)=c+a\exp(-0.5(x-x0)^2/w^2)$')
def ExponentialPowerZero(a, w, p, c):
"""Exponential decay with variable power centered at zero.
f = a*exp(-(x/w)**p) + c
Parameter:
a = amplitude
w = width
p = power
c = offset in y-direction
"""
return lambda x: a*np.exp( -(x/w)**p ) + c
setattr(ExponentialPowerZero, 'formula', r'$f(a,w,p,c;x)=a\exp(-(x/w)^p)+c$')
def ExponentialPowerZeroEstimator(x, y):
"""Estimator for exponential decay with variable offset."""
c=y[-1]
a=y[0]-c
w=x[-1]*0.5
return a, w, 2, c
def GaussianZeroEstimator(x, y):
"""Gaussian Estimator without x offset. c+ a*exp( -0.5*(x/w)**2)"""
a=y.argmax()
#x0=x[y.argmax()]
w=x[(len(x)/2)]
c=(min(y)+max(y))/2
return a, w, c
def DoubleGaussian(a1, a2, x01, x02, w1, w2):
"""Gaussian function with offset."""
return lambda x: a1*np.exp( -0.5*((x-x01)/w1)**2 ) + a2*np.exp( -0.5*((x-x02)/w2)**2 )
setattr(DoubleGaussian, 'formula', r'$f(c,a1, a2,x01, x02,w1,w2;x)=a_1\exp(-0.5((x-x_{01})/w_1)^2)+a_2\exp(-0.5((x-x_{02})/w_2)^2)$')
def DoubleGaussianEstimator(x, y):
center = (x*y).sum() / y.sum()
ylow = y[x < center]
yhigh = y[x > center]
x01 = x[ylow.argmax()]
x02 = x[len(ylow)+yhigh.argmax()]
a1 = ylow.max()
a2 = yhigh.max()
w1 = w2 = center**0.5
return a1, a2, x01, x02, w1, w2
# important note: lorentzian can also be parametrized with an a' instead of a,
# such that a' is directly related to the amplitude (a'=f(x=x0)). In this case a'=a/(pi*g)
# and f = a * g**2 / ( (x-x0)**2 + g**2 ) + c.
# However, this results in much poorer fitting success. Probably the g**2 in the numerator
# causes problems in Levenberg-Marquardt algorithm when derivatives
# w.r.t the parameters are evaluated. Therefore it is strongly recommended
# to stick to the parametrization given below.
# The amplitude is a/(pi*g), the area under the curve is 'a'
def Lorentzian(c, x0, g, a):
"""Lorentzian centered at x0, with area a, offset y0 and HWHM g."""
return lambda x: a / np.pi * ( g / ( (x-x0)**2 + g**2 ) ) + c
setattr(Lorentzian, 'formula', r'$f(x0,g,a,c;x)=a/\pi (g/((x-x_0)^2+g^2)) + c$')
def LorentzianNoOffset(x0, g, a):
"""Lorentzian centered at x0, with amplitude a, and HWHM g."""
return lambda x: a / np.pi * ( g / ( (x-x0)**2 + g**2 ) )
def Nlorentzians(*p):
N = (len(p)-1)/3
def f(x):
y = p[0]*np.ones(x.shape)
i = 0
for i in range(N):
y += LorentzianNoOffset(*p[i*3+1:i*3+4])(x)
return y
return f
def LorentzianEstimator(x, y):
c = mode(y)[0][0]
yp = y - c
Y = np.sum(yp) * (x[-1] - x[0]) / len(x)
ymin = yp.min()
ymax = yp.max()
if ymax > abs(ymin):
y0 = ymax
else:
y0 = ymin
x0 = x[y.argmin()]
g = Y / (np.pi * y0)
a = y0 * np.pi * g
return x0, g, a, c
def Antibunching(alpha, c, tau, t0):
"""Antibunching. g(2) accounting for Poissonian background."""
return lambda t: c*(1-alpha*np.exp(-(t-t0)/tau))
setattr(Antibunching, 'formula', r'$g(\alpha,c,\tau,t_0;t)=c(1 - \alpha \exp(-(t-t_0)/\tau))$')
def FCSTranslationRotation(alpha, tau_r, tau_t, N):
"""Fluorescence Correlation Spectroscopy. g(2) accounting for translational and rotational diffusion."""
return lambda t: (1 + alpha*np.exp(-t/tau_r) ) / (N * (1 + t/tau_t) )
setattr(FCSTranslationRotation, 'formula', r'$g(\alpha,\tau_R,\tau_T,N;t)=\frac{1 + \alpha \exp(-t/\tau_R)}{N (1 + t/\tau_T)}$')
def FCSTranslation(tau, N):
"""Fluorescence Correlation Spectroscopy. g(2) accounting for translational diffusion."""
return lambda t: 1. / (N * (1 + t/tau) )
setattr(FCSTranslation, 'formula', r'$g(\tau,N;t)=\frac{1}{N (1 + t/\tau)}$')
def SumOverFunctions( functions ):
"""Creates a factory that returns a function representing the sum over 'functions'.
'functions' is a list of functions.
The resulting factory takes as arguments the parameters to all functions,
flattened and in the same order as in 'functions'."""
def function_factory(*args):
def f(x):
y = np.zeros(x.shape)
i = 0
for func in functions:
n = func.func_code.co_argcount
y += func(*args[i,i+n])(x)
i += n
return f
return function_factory
def brot_transitions_upper(B, D, E, phase):
return lambda theta: 3./2. * B**2/D * np.sin(theta + phase)**2 + ( B**2 * np.cos(theta + phase)**2 + (E + B**2/(2*D) * np.sin(theta+phase)**2)**2)**0.5 + D
def brot_transitions_lower(B, D, E, phase):
return lambda theta: 3./2. * B**2/D * np.sin(theta + phase)**2 - ( B**2 * np.cos(theta + phase)**2 + (E + B**2/(2*D) * np.sin(theta+phase)**2)**2)**0.5 + D
#################################################################
# convenience functions for performing some frequently used fits
#################################################################
from scipy.signal import find_peaks_cwt
def find_peaks(x, y, width, n_peaks=-1, baseline_bins=None, estimator='wavelet', peak_shape='Lorentzian'):
"""
Find peaks in a noisy 1D data set by applying continuous wavelet transform
with an expected peak width and fit the determined peaks with lorentzians.
The function determines automatically whether the peaks are positive or negative,
however all peaks are expected to have the same sign.
The number of peaks can be limited by specifying n_peaks > 0. In this case, the 'n_peaks'
peaks with largest amplitudes are taken.
By default, the peaks are subsequently least-square-fitted with a multi-lorentzian function.
This step can be omitted by specifying 'peak_shape'=None.
"""
# estimate the baseline
y0 = baseline(y, baseline_bins)
# determine whether extrema are positive or negative by checking the distance of the absolute maximum and absolute minimum w.r.t the baseline
if np.abs(y.max()-y0) > np.abs(y0-y.min()): # highest maximum larger than smallest minimum
yp = y - y0
sign = 1
else:
yp = y0 - y
sign = -1
if estimator == 'wavelet':
dx = x[1]-x[0]
peak_indices = np.array(find_peaks_cwt(yp, np.array((width/dx,))))
peak_amps = yp[peak_indices]
if n_peaks > 0: # keep only the n_peaks largest
sort_map = peak_amps.argsort()
peak_amps = peak_amps[sort_map][-n_peaks:]
peak_indices = peak_indices[sort_map][-n_peaks:]
else:
raise ValueError('Estimator not implemented')
res = {'x0':x[peak_indices], 'y0':y[peak_indices]}
n = len(peak_indices)
if peak_shape == 'Lorentzian':
hwhm = 0.5*width
p = [0.0]
for i, peak_index in enumerate(peak_indices):
p.append(x[peak_index])
p.append(hwhm)
p.append(peak_amps[i]*np.pi*hwhm)
r = fit_n_peaks(x,yp,p,LorentzianNoOffset)
if not (r[-1] == 0):
p = np.array(r[0])
#delta = np.diag(r[1])**0.5
if sign == -1:
p[0] *= -1
p[3::3] *= -1
p[0] += y0
res['p']=p
return res
def fit_n_peaks(x,y,p,peak_func):
N = (len(p)-1)/3
# chi for N peaks with a common baseline
def chi(p):
yp = p[0]-y
for i in range(N):
yp += peak_func(*p[i*3+1:i*3+4])(x)
return yp
r = leastsq(chi, p, full_output=True)
return r
def find_local_maxima(y,n):
"Returns the indices of the n largest local maxima of y."
half = 0.5*y.max()
mask = y>half
# get left and right edges of connected regions
right_shifted = np.append(False, mask[:-1])
left_shifted = np.append(mask[1:], False)
left_edges = np.where( np.logical_and(mask,np.logical_not(right_shifted) ))[0]
right_edges = np.where( np.logical_and(mask,np.logical_not(left_shifted)) )[0] + 1
if len(left_edges) < n:
raise RuntimeError('did not find enough edges')
indices = []
for k in range(len(left_edges)):
left = left_edges[k]
right = right_edges[k]
indices.append( y[left:right].argmax()+left )
indices = np.array(indices)
maxima = y[indices]
indices = indices[maxima.argsort()][::-1]
return indices[:n]
"""
def fit_rabi(x, y, s):
y_offset=y.mean()
yp = y - y_offset
p = fit(x, yp, CosinusNoOffset, CosinusNoOffsetEstimator)
if p[0] < 0:
p[0] = -p[0]
p[2] = ( ( p[2]/p[1] + 0.5 ) % 1 ) * p[1]
p = fit(x, yp, CosinusNoOffset, p)
p = (p[0], p[1], p[2], y_offset)
result = nonlinear_model(x, y, s, Cosinus, p)
p = result[0]
if p[2]>0.5*p[1]:
while(p[2]>0.5*p[1]):
p[2] -= p[1]
result = nonlinear_model(x, y, s, Cosinus, p)
return result
"""
def fit_rabi(x, y, s):
y_offset=y.mean()
yp = y - y_offset
p = fit(x, yp, CosinusNoOffset, CosinusNoOffsetEstimator)
if p[0] < 0:
p[0] = -p[0]
p[2] = ( ( p[2]/p[1] + 0.5 ) % 1 ) * p[1]
#p = fit(x, yp, CosinusNoOffset, p)
p = (p[0], p[1], y_offset)
return nonlinear_model(x, y, s, Cosinus, p)
def extract_pulses(y):
"""
Extracts pi, pi/2 and 3pi/2 pulses from a Rabi measurement.
Parameters:
y = the arry containing y data
Returns:
f, r, pi, 2pi = arrays containing the indices of the respective pulses and their multiples
"""
# The goal is to find local the rising and falling edges and local minima and maxima.
# First we estimate the 'middle line' by the absolute minimum and maximum.
# Then we cut the data into sections below and above the middle line.
# For every section we compute the minimum, respectively maximum.
# The falling and rising edges mark multiples of pi/2, respectively 3pi/2 pulses.
# center line
m=0.5*(y.max()+y.min())
# boolean array containing positive and negative sections
b = y < m
# indices of rising and falling edges
# rising edges: last point below center line
# falling edges: last point above center line
rising = np.where(b[:-1]&~b[1:])[0]
falling = np.where(b[1:]&~b[:-1])[0]
# local minima and maxima
pi = [ y[:rising[0]].argmin() ]
two_pi = [ y[:falling[0]].argmax() ]
for i in range(1,len(rising)):
pi.append( rising[i-1] + y[rising[i-1]:rising[i]].argmin() )
for i in range(1,len(falling)):
two_pi.append(falling[i-1] + y[falling[i-1]:falling[i]].argmax() )
# For rising edged, we always use the last point below the center line,
# however due to finite sampling and shot noise, sometimes
# the first point above the line may be closer to the actual zero crossing
for i, edge in enumerate(rising):
if y[edge+1]-m < m-y[edge]:
rising[i] += 1
# similarly for the falling edges
for i, edge in enumerate(falling):
if m-y[edge+1] < y[edge]-m:
falling[i] += 1
return np.array(falling), np.array(rising), np.array(pi), np.array(two_pi)
if __name__ == '__main__':
from tools.data_toolbox import load
filename = '2014-11-14_ODMR_08.pys'
d = load(filename)
x = d['frequency']
y = d['counts']
#y0 = baseline(y)
#y = y0 - y
width = 5e5
r = find_peaks(x,y,width,n_peaks=3)
x0 = r['x0']
y0 = r['y0']
p = r['p']
#hwhm = 0.5*width
#p = [0.0]
#for i, xi in enumerate(x0):
# p.append(xi)
# p.append(hwhm)
# p.append(y0[i]*np.pi*hwhm)
#r = fit_n_peaks(x,y,p,LorentzianNoOffset)
#pp = r[0]
import pylab
pylab.figure()
pylab.plot(x,y,'b-')
pylab.plot(x0,y0,'r.')
for i in range(3):
pi = np.append(p[0],p[i*3+1:i*3+4])
pylab.plot(x,Lorentzian(*pi)(x),'r-')
#pylab.plot(x,n_lorentzians(*p)(x),'r.')
#pylab.plot(x,n_lorentzians(*pp)(x),'g.')
pylab.show()
| [
"helmut@fedder.net"
] | helmut@fedder.net |
ff5236cfbc685f7702d63948ddb042f1e8ba1d78 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-multimediaai/aliyunsdkmultimediaai/request/v20190810/RegisterFaceImageRequest.py | fcfb213b95d912b2eaf0be5026e27f0f3fad4814 | [
"Apache-2.0"
] | permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 1,779 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmultimediaai.endpoint import endpoint_data
class RegisterFaceImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'multimediaai', '2019-08-10', 'RegisterFaceImage')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_FaceGroupId(self):
return self.get_query_params().get('FaceGroupId')
def set_FaceGroupId(self,FaceGroupId):
self.add_query_param('FaceGroupId',FaceGroupId)
def get_FacePersonId(self):
return self.get_query_params().get('FacePersonId')
def set_FacePersonId(self,FacePersonId):
self.add_query_param('FacePersonId',FacePersonId)
def get_ImageUrl(self):
return self.get_query_params().get('ImageUrl')
def set_ImageUrl(self,ImageUrl):
self.add_query_param('ImageUrl',ImageUrl) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
440c195bd9d66a70de8b1abe612b540ee18ab1dc | 4e7784abf3b2cddd98c1c1f092f576dcab04f74c | /scripts/fbx_importer/fbx_helper.py | 39ad90663cac888e7ef286d5a052d4d945b5475e | [
"MIT"
] | permissive | tm8r/MayaFBXImporter | ad6f478dd1e0697699df7a7b0a83b0b9b78c4819 | f349564d2ec0c09c17253a6a9fd4958977e7b999 | refs/heads/master | 2022-05-18T01:24:19.502698 | 2022-05-02T12:07:27 | 2022-05-02T12:07:27 | 209,183,153 | 4 | 2 | MIT | 2022-05-02T12:07:28 | 2019-09-18T00:39:51 | Python | UTF-8 | Python | false | false | 1,152 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from .vendor.Qt import QtWidgets
from .libs.maya import fbx
from .libs.maya import namespace
from . import history_helper
def import_fbx(path, import_mode, parent):
"""import fbx
Args:
path (unicode): path
import_mode (.libs.maya.fbx.FBXImportMode): import mode
parent (QtWidgets.QWidget): parent
"""
namespaces = namespace.get_namespaces(return_separator=True, return_root=True)
if len(namespaces) == 1:
fbx.import_fbx(path, import_mode, namespaces[0])
history_helper.add_recent_file(path)
return
ns, confirmed = QtWidgets.QInputDialog.getItem(parent,
"Select Namespace",
"Namespace",
namespaces,
0,
False)
if not confirmed:
return
fbx.import_fbx(path, import_mode, ns)
history_helper.add_recent_file(path)
| [
"tm8r.nrm@gmail.com"
] | tm8r.nrm@gmail.com |
20bf8ced91a04164ec1fb736de04da27ad9c1360 | 5c79ed194ae04f2b3154c3daba77197b57e900e9 | /password_generator.py | 3cf50310740a86a5ac1e2a21a0532f94f10a4d4d | [
"Apache-2.0"
] | permissive | abrahamanderson19972020/Password-GUI-Manager | 14b6579e8b9742bac9d04990805162300649480e | bf6516065af9918d7a2fd08128222d30f2a2d436 | refs/heads/main | 2023-07-20T05:33:33.064181 | 2021-08-15T18:16:57 | 2021-08-15T18:16:57 | 396,293,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | #Password Generator Project
import random
def password_maker():
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
letter_list = [random.choice(letters) for i in range(random.randint(10, 12))]
number_list = [random.choice(numbers) for i in range(random.randint(3, 5))]
symbol_list = [random.choice(symbols) for i in range(random.randint(3, 5))]
passport_list = letter_list + number_list + symbol_list
random.shuffle(passport_list)
return "".join(passport_list)
| [
"abraham.anderson83@gmail.com"
] | abraham.anderson83@gmail.com |
a4219b235f2b02423f9e0f1c6bb9b429308d9880 | 8c2d92c3be91dfe97dcae8a09c2a95de8f58cd41 | /mm131zidong.py | f594d0f3bdf70d106cdba8331a2578c89a8d1386 | [] | no_license | SeemNobody/mm131-linux | 8ff442f1f73dd470fcd7065252a7e78a3a591d16 | fb61e9cba7f92a32feff3a43f9f18b0db4ba394e | refs/heads/master | 2022-09-21T07:15:00.134804 | 2018-01-04T10:02:00 | 2018-01-04T10:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,889 | py | #!/usr/bin/env python
# coding=utf-8
import urllib.request
from bs4 import BeautifulSoup
import os
import shutil
import urllib.error
import pymysql
import re
def xiazai_mm131(url):
html = urllib.request.urlopen(url).read()
title = BeautifulSoup(html,'lxml').find("title").get_text()
title = title[:-19]
title = title.replace(':', '')
title = title.replace('?', '')
title = title.replace('"', '')
html = urllib.request.urlopen(url).read()
print(html)
page = BeautifulSoup(html,'lxml').find("span", {"class":"page-ch"}).get_text()
print(page)
pattern = re.compile('\d*')
page = pattern.findall(page)[1]
try:
os.makedirs(r"/home/hj/python/pic/mm131/" + title + page)
except:
return
try:
html = urllib.request.urlopen(urllib.request.Request(url))
picurl = BeautifulSoup(html,'lxml').find("div", {"class": "content-pic"}).find("img")["src"]
req = urllib.request.Request(picurl)
req.add_header("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8")
req.add_header("Accept-Encoding","gzip,deflate")
req.add_header("Accept-Language","zh-CN,zh;q=0.9")
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.25 Safari/537.36")
req.add_header("Cookie","bdshare_firstime=1514538484412; UM_distinctid=160a187435124-03f316a3ae33c8-5d4e231d-144000-160a1874352a87; CNZZDATA3866066=cnzz_eid%3D935699110-1494676185-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1494676185; Hm_lvt_9a737a8572f89206db6e9c301695b55a=1514538490,1514565948; Hm_lpvt_9a737a8572f89206db6e9c301695b55a=1514567510")
req.add_header("Referer","http://www.mm131.com/xinggan/3561.html")
req.add_header("Connection","keep-alive")
req.add_header("Host","img1.mm131.me")
img = urllib.request.urlopen(req).read()
f = open(r"/home/hj/python/pic/mm131/" + title + page + "/" + "1.jpg", "wb")
f.write(img)
f.close()
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
conn = pymysql.connect(host='192.168.1.101', user='root', passwd='123456', db='mypydb', charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131(url)" "values(%s)")
cur.execute(sql, url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
if hasattr(e, "reason"):
print(e.reason)
conn = pymysql.connect(host='192.168.1.101', user='root', passwd='123456', db='mypydb', charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131(url)" "values(%s)")
cur.execute(sql, url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
finally:
pass
after = int(page) + 1
for i in range(2, after):
try:
url0 = url[:-5]
url1 = url0 + '_' + str(i) + '.html'
html = urllib.request.urlopen(urllib.request.Request(url1))
picurl = BeautifulSoup(html, 'lxml').find("div", {"class": "content-pic"}).find("img")["src"]
req = urllib.request.Request(picurl)
req.add_header("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8")
req.add_header("Accept-Encoding","gzip,deflate")
req.add_header("Accept-Language","zh-CN,zh;q=0.9")
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.25 Safari/537.36")
req.add_header("Cookie","bdshare_firstime=1514538484412; UM_distinctid=160a187435124-03f316a3ae33c8-5d4e231d-144000-160a1874352a87; CNZZDATA3866066=cnzz_eid%3D935699110-1494676185-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1494676185; Hm_lvt_9a737a8572f89206db6e9c301695b55a=1514538490,1514565948; Hm_lpvt_9a737a8572f89206db6e9c301695b55a=1514567510")
req.add_header("Referer","http://www.mm131.com/xinggan/3561.html")
req.add_header("Connection","keep-alive")
req.add_header("Host","img1.mm131.me")
img = urllib.request.urlopen(req).read()
f = open(r"/home/hj/python/pic/mm131/" + title + page + "/" + str(i) + ".jpg", "wb")
f.write(img)
f.close()
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
conn = pymysql.connect(host='192.168.1.101',user='root',passwd='123456',db='mypydb',charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131(url)" "values(%s)")
cur.execute(sql,url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
if hasattr(e,"reason"):
print(e.reason)
conn = pymysql.connect(host='192.168.1.101',user='root',passwd='123456',db='mypydb',charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131(url)" "values(%s)")
cur.execute(sql,url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
finally:
pass
def xiazai_mm131_sql(url):
html = urllib.request.urlopen(url)
title = BeautifulSoup(html,'lxml').find("title").get_text()
title = title[:-19]
title = title.replace(':', '')
title = title.replace('?', '')
title = title.replace('"', '')
html = urllib.request.urlopen(url)
page = BeautifulSoup(html, 'lxml').find("span", {"class": "page-ch"}).get_text()
pattern = re.compile('\d*')
page = pattern.findall(page)[1]
try:
os.makedirs(r"/home/hj/python/pic/mm131/" + title + page)
except:
shutil.rmtree(r"/home/hj/python/pic/mm131/" + title + page)
os.makedirs(r"/home/hj/python/pic/mm131/" + title + page)
try:
html = urllib.request.urlopen(urllib.request.Request(url))
picurl = BeautifulSoup(html,'lxml').find("div",{"class": "content-pic"}).find("img")["src"]
req = urllib.request.Request(picurl)
req.add_header("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8")
req.add_header("Accept-Encoding","gzip,deflate")
req.add_header("Accept-Language","zh-CN,zh;q=0.9")
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.25 Safari/537.36")
req.add_header("Cookie","bdshare_firstime=1514538484412; UM_distinctid=160a187435124-03f316a3ae33c8-5d4e231d-144000-160a1874352a87; CNZZDATA3866066=cnzz_eid%3D935699110-1494676185-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1494676185; Hm_lvt_9a737a8572f89206db6e9c301695b55a=1514538490,1514565948; Hm_lpvt_9a737a8572f89206db6e9c301695b55a=1514567510")
req.add_header("Referer","http://www.mm131.com/xinggan/3561.html")
req.add_header("Connection","keep-alive")
req.add_header("Host","img1.mm131.me")
img = urllib.request.urlopen(req).read()
f = open(r"/home/hj/python/pic/mm131/" + title + page + "/" + "1.jpg", "wb")
f.write(img)
f.close()
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
conn = pymysql.connect(host='192.168.1.101', user='root', passwd='123456', db='mypydb', charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131m(url)" "values(%s)")
cur.execute(sql, url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
if hasattr(e,"reason"):
print(e.reason)
conn = pymysql.connect(host='192.168.1.101', user='root', passwd='123456', db='mypydb', charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131m(url)" "values(%s)")
cur.execute(sql, url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
finally:
pass
after = int(page) + 1
for i in range(2, after):
try:
url0 = url[:-5]
url1 = url0 + '_' + str(i) + '.html'
html = urllib.request.urlopen(urllib.request.Request(url1))
picurl = BeautifulSoup(html,'lxml').find("div", {"class": "content-pic"}).find("img")["src"]
req = urllib.request.Request(picurl)
req.add_header("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8")
req.add_header("Accept-Encoding","gzip,deflate")
req.add_header("Accept-Language","zh-CN,zh;q=0.9")
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.25 Safari/537.36")
req.add_header("Cookie","bdshare_firstime=1514538484412; UM_distinctid=160a187435124-03f316a3ae33c8-5d4e231d-144000-160a1874352a87; CNZZDATA3866066=cnzz_eid%3D935699110-1494676185-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1494676185; Hm_lvt_9a737a8572f89206db6e9c301695b55a=1514538490,1514565948; Hm_lpvt_9a737a8572f89206db6e9c301695b55a=1514567510")
req.add_header("Referer","http://www.mm131.com/xinggan/3561.html")
req.add_header("Connection","keep-alive")
req.add_header("Host","img1.mm131.me")
img = urllib.request.urlopen(req).read()
f = open(r"/home/hj/python/pic/mm131/" + title + page + "/" + str(i) + ".jpg", "wb")
f.write(img)
f.close()
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
conn = pymysql.connect(host='192.168.1.101',user='root',passwd='123456',db='mypydb',charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131m(url)" "values(%s)")
cur.execute(sql,url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
if hasattr(e,"reason"):
print(e.reason)
conn = pymysql.connect(host='192.168.1.101',user='root',passwd='123456',db='mypydb',charset='utf8')
cur = conn.cursor()
sql = ("insert into mm131m(url)" "values(%s)")
cur.execute(sql,url)
conn.commit()
cur.close()
conn.close()
print('未下载网址已存入数据库')
finally:
pass
if __name__ == '__main__':
#url = 'http://www.mm131.com/xinggan/'
#html = urllib.request.urlopen(url).read()
#urls = BeautifulSoup(html, 'lxml').find('dl', {'class': 'list-left public-box'}).findAll('a', {'target': '_blank'})
#print('第1页')
#for url in urls:
#url = url['href']
#print(url)
#xiazai_mm131(url)
for i in range(22,122):
print("第"+str(i)+"页")
url = 'http://www.mm131.com/xinggan/list_6_'+str(i)+'.html'
html = urllib.request.urlopen(url).read()
urls = BeautifulSoup(html,'lxml').find('dl', {'class': 'list-left public-box'}).findAll('a',{'target': '_blank'})
print(urls)
for url in urls:
url = url['href']
print(url)
xiazai_mm131(url)
urls = []
conn = pymysql.connect(host='127.0.0.1',user='root',passwd='123456',db='mypydb',charset='utf8')
cur = conn.cursor()
cur.execute("select url from mm131")
results = cur.fetchall()
cur.close()
conn.close()
result = list(results)
for r in result:
urls.append("%s"%r)
urls = list(set(urls))
while urls:
url = urls.pop()
print("重新下载:%s"%url)
xiazai_mm131_sql(url)
try:
conn = pymysql.connect(host='127.0.0.1',user='root',passwd='123456',db='mypydb',charset='utf8')
cur = conn.cursor()
cur.execute("select url from mm131m")
results = cur.fetchall()
cur.execute("truncate mm131m")
cur.close()
conn.close()
result = list(results)
for r in result:
urls.append("%s"%r)
urls = list(set(urls))
except:
pass | [
"834775954@qq.com"
] | 834775954@qq.com |
3f4d8e5bb5ac53e3e70d2d2c21110925d8afa471 | 3a2d89bc4d8d5385f835a64ef6694604b4aa063c | /rockpaperscissors.py | d7194b8c8ce81a0ddc7c64ccf771c97cc64d2709 | [] | no_license | dancingdogs/myprojects1 | c4045c3e4132552adea2e4031c4a83844d3f73ac | f04edcc3643bbf3c751865e9ef07f7c222b09e16 | refs/heads/master | 2022-12-13T05:48:45.496393 | 2020-09-07T12:14:50 | 2020-09-07T12:14:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | from random import randint
#create list of play options
t = ["Rock","Paper","Scissors"]
#assign a random play to the computer
computer = t[randint(0,2)]
#set player to False
player=False
while player==False:
#set player to True
player=input("Rock, Paper, Scissors?")
if player == computer:
print("Tie!")
elif player == "Rock":
if computer == "Paper":
print("You lose!",computer,"covers",player)
else:
print("You win!",player,"smashes",computer)
elif player == "Paper":
if computer == "Scissors":
print("You lose!",computer,"cut",player)
else:
print("You win!",player,"covers",computer)
elif player == "Scissors":
if computer == "Rock":
print("You lose!",computer,"smashes",player)
else:
print("You win!",player,"cut",computer)
else:
print("That is not a valid play. Check your spelling!")
player = False
computer=t[randint(0,2)] | [
"noreply@github.com"
] | noreply@github.com |
29c051ba4037637ba19565b0c61472f380db748c | c03869d82865e0d10c9f34d53563a3b0a49b5b0a | /env_file.py | f117aef8cf91c098d397640bbd126d81d689522b | [] | no_license | Azimkhan/env_file | 9385b8724e346ed0697e387eedb504cc27f921c5 | e865ba630ad6f745518dd65877f28b0ceb2d3e9d | refs/heads/master | 2016-09-01T08:32:32.025926 | 2016-03-17T17:03:13 | 2016-03-17T17:03:13 | 54,100,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | import os
def setup_env_from_file(file_name='.env', file_dir=None):
if file_dir is None:
file_dir = os.getcwd()
file_path = os.path.join(file_dir, file_name)
if not os.path.exists(file_path):
return
with open(file_path, 'r') as f:
for line in f.readlines():
x = line.strip()
if not x:
continue
i = x.find('=')
if i > 0:
param = x[:i].strip()
val = x[i+1:].strip()
else:
param = x
val = ''
os.environ[param] = val
| [
"me@azimkhan.net"
] | me@azimkhan.net |
0099ea1a24cd0a7e27e7caa9bcd30ad25bb5fc29 | d4b91d9ebb7c850f07b06e5c15794b2885f2e767 | /6/Tema3(Циклы)/6.c_6.py | 3d423e792c84c79c5c729e0ca3d5be2f25693867 | [] | no_license | Timur597/First6team | 13b6dbb2d2e68d5df5c76c5bbba587d563a95957 | 4df85a6f20bad626ad76196cd5bc867ce27d0aac | refs/heads/master | 2023-03-05T15:39:24.311784 | 2021-02-20T07:17:36 | 2021-02-20T07:17:36 | 340,588,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | 6 Задание
names = ('Максат','Лязат','Данияр','Айбек','Атай','Салават','Адинай','Жоомарт','Алымбек','Эрмек','Дастан','Бекмамат','Аслан')
i = 0
while i < 12:
print (names [i])
i = i + 2
| [
"khalilov.timur97@mail.ru"
] | khalilov.timur97@mail.ru |
73acba9528101c1bfa9187c8776c8d7234afbc3f | c6fca34b2c9cb973d9d65d23e58e40d4513e173a | /aoc2015/day18.py | 65008c1bad113a40d1876343cbf348d6f612d6a1 | [] | no_license | tomkooij/AdventOfCode | 8ff47c027c887194b0d441f61a8db172c4e260ea | 7890d45a01498dcb48972a7e311888ce6f003bd2 | refs/heads/master | 2021-08-15T19:46:21.869137 | 2021-01-18T06:37:50 | 2021-01-18T06:37:50 | 48,421,868 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | # adventofcode.com
# day18
from copy import deepcopy
INPUT = ('input/input18', 100)
TESTCASE = ('input/test18', 4)
ON = '#'
OFF = '.'
def pretty_print(lights):
for l in lights:
print ''.join(l).rstrip('\n')
def count(lights):
return sum([l.count('#') for l in lights])
def get_neighbours(lights, x, y):
neighbours = []
xmax = ymax = len(lights)
for i in range(max(y-1, 0), min(y+2, ymax)):
for j in range(max(x-1,0), min(x+2, xmax)):
neighbours.append((i,j))
if (y,x) in neighbours:
neighbours.remove((y,x))
return neighbours
def count_neighbours(lights, x, y):
n = get_neighbours(lights, x, y)
return count([lights[y][x] for y,x in n])
FILENAME, STEPS = INPUT
if __name__ == '__main__':
with open(FILENAME) as f:
lights = map(list, f.read().splitlines())
for _ in range(STEPS+1):
old_lights = deepcopy(lights)
pretty_print(lights)
print count(lights)
for y in range(0, len(lights)):
for x in range(0, len(lights)):
#print y, x, count_neighbours(lights, x, y)
if old_lights[y][x] == ON:
if not count_neighbours(old_lights, x, y) in [2, 3]:
lights[y][x] = OFF
elif old_lights[y][x] == OFF:
if count_neighbours(old_lights, x, y) == 3:
lights[y][x] = ON
else:
assert False, 'lp0 on fire! %d %d %c' % (x, y, lights[y][x])
| [
"tomkooij@tomkooij.nl"
] | tomkooij@tomkooij.nl |
1656940dd6607f773281e142235fe11ba06d5b00 | 14b190bf799bbbacda22cea27cb4689be46b46de | /jobs/migrations/0001_initial.py | 5e903d40218b262e84948630dfe1ea2a7e7da231 | [] | no_license | WaliEEE/portfolio-waleee | e3516e1f369905dd44787cae5bcbae6547a581b6 | 314ba08c4fa6a8136bbe5490617cda2058bf221d | refs/heads/master | 2022-12-05T01:14:43.648681 | 2020-09-02T17:05:04 | 2020-09-02T17:05:04 | 292,340,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 3.1 on 2020-08-27 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| [
"jocularriad2@gmail.com"
] | jocularriad2@gmail.com |
1d87192e81d61530ae36b21063abb510bd089aee | fbaf44a5f4effe2838a03165f237a7a282284f64 | /Practice/PIle_length-width_soilE/1.1 readODB.py | 3f59145257606d79712227f140d6214a9b44a5d9 | [] | no_license | WangDooo/Python-in-Abaqus | b568f5499bbfd8bc4893f4510a233b9c0be30cf8 | c7bcbd1adc3bcff9661e13c8ce883cb59269ceb8 | refs/heads/master | 2021-06-13T14:05:25.639543 | 2021-03-24T03:32:44 | 2021-03-24T03:32:44 | 173,902,521 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # coding:utf8
from odbAccess import *
odb = openOdb(path='Job-1.odb')
step = odb.steps['Step-1']
point = odb.rootAssembly.nodeSets['SET-PILETOPPOINT']
lastFrame = step.frames[-1]
u = lastFrame.fieldOutputs['U']
u_point = u.getSubset(region=point)
uFile = open('U2.csv','w')
uFile.write('nodeLabel,U2 \n')
for uValue in u_point.values:
uFile.write('NO.%s, %f \n' % (uValue.nodeLabel, uValue.data[1])) | [
"wangbc1993@163.com"
] | wangbc1993@163.com |
8368d9e18fd02f573ea870d4d0cdbff40237d735 | c23c092689ba634bb1037aaa0e058c8edec3407a | /Introduction to Machine Learning/Code Camp/Exercise2/LogisticR.py | e250a4d7b7813ac677020e5dc4a19caa81c3b202 | [] | no_license | PemYanZen/DataScience_2019501129 | 3911d2204159eeaa84350764f183c550ae4c938d | f42cdacb6e2e8942df79b658594b64c275fabe11 | refs/heads/master | 2023-06-01T09:26:54.155414 | 2021-06-22T03:11:26 | 2021-06-22T03:11:26 | 295,600,844 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,244 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 11:59:12 2020
@author: pemayangdon
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.impute import KNNImputer
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error as mserr
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.neural_network import MLPClassifier
#read train and test datasets into pandas DataFrames trainx_df, trainy_df,testx_df
def readDataSets(train_path, test_path,predict_col,index_col=None):
if index_col==None:
trainx_df=pd.read_csv(train_path)
trainy_df=trainx_df[predict_col]
trainx_df.drop(predict_col,axis=1,inplace=True)
testx_df=pd.read_csv(test_path)
else:
trainx_df=pd.read_csv(train_path,index_col='Id')
trainy_df=trainx_df[predict_col]
trainx_df.drop(predict_col,axis=1,inplace=True)
testx_df=pd.read_csv(test_path,index_col='Id')
return trainx_df,trainy_df,testx_df
# As a first step of pre-processing remove columns with null value ratio greater than provided limit
def dropFeturesWithNullValuesGreaterThanALimit(trainx_df, testx_df,null_ratio=0.3):
sample_size=len(trainx_df)
columns_with_null_values=[[col,float(trainx_df[col].isnull().sum())/float(sample_size)] for col in trainx_df.columns if trainx_df[col].isnull().sum()]
columns_to_drop=[x for (x,y) in columns_with_null_values if y>null_ratio]
trainx_df.drop(columns_to_drop,axis=1,inplace=True)
testx_df.drop(columns_to_drop,axis=1,inplace=True)
return trainx_df,testx_df
# As a second pre-processing step find all categorical columns and one hot encode them. Before one hot encode fill all null values with dummy in those columns. Some categorical columns in trainx_df may not have null values in trainx_df but have null values in testx_df. To overcome this problem we will add a row to the trainx_df with all dummy values for categorical values. Once one hot encoding is complete drop the added dummy column
def oneHotEncode(trainx_df,testx_df):
categorical_columns=[col for col in trainx_df.columns if trainx_df[col].dtype==object]
ordinal_columns=[col for col in trainx_df.columns if col not in categorical_columns]
dummy_row=list()
for col in trainx_df.columns:
if col in categorical_columns:
dummy_row.append("dummy")
else:
dummy_row.append("")
new_row=pd.DataFrame([dummy_row],columns=trainx_df.columns)
trainx_df=pd.concat([trainx_df,new_row],axis=0, ignore_index=True)
testx_df=pd.concat([testx_df],axis=0,ignore_index=True)
for col in categorical_columns:
trainx_df[col].fillna(value="dummy",inplace=True)
testx_df[col].fillna(value="dummy",inplace=True)
enc = OneHotEncoder(drop='first',sparse=False)
enc.fit(trainx_df[categorical_columns])
trainx_enc=pd.DataFrame(enc.transform(trainx_df[categorical_columns]))
testx_enc=pd.DataFrame(enc.transform(testx_df[categorical_columns]))
trainx_enc.columns=enc.get_feature_names(categorical_columns)
testx_enc.columns=enc.get_feature_names(categorical_columns)
trainx_df=pd.concat([trainx_df[ordinal_columns],trainx_enc],axis=1,ignore_index=True)
testx_df=pd.concat([testx_df[ordinal_columns],testx_enc],axis=1,ignore_index=True)
trainx_df.drop(trainx_df.tail(1).index,inplace=True)
return trainx_df,testx_df
# As a third step of pre-processing fill all missing values for ordinal features
def fillMissingValues(trainx_df,testx_df):
imputer = KNNImputer(n_neighbors=2)
imputer.fit(trainx_df)
trainx_df_filled = imputer.transform(trainx_df)
trainx_df_filled=pd.DataFrame(trainx_df_filled,columns=trainx_df.columns)
testx_df_filled = imputer.transform(testx_df)
testx_df_filled=pd.DataFrame(testx_df_filled,columns=testx_df.columns)
testx_df_filled.reset_index(drop=True,inplace=True)
return trainx_df_filled,testx_df_filled
# As a fourth step of pre-processing scale all the features either through Standard scores or MinMax scaling
def scaleFeatures(trainx_df,testx_df,scale='Standard'):
if scale == 'Standard':
scaler = preprocessing.StandardScaler().fit(trainx_df)
trainx_df=scaler.transform(trainx_df)
testx_df=scaler.transform(testx_df)
elif scale == 'MinMax':
scaler=preprocessing.MinMaxScaler().fit(trainx_df)
trainx_df=scaler.transform(trainx_df)
testx_df=scaler.transform(testx_df)
return trainx_df,testx_df
#As fifth step of preprocessing apply PCA
def findPrincipalComponents(trainx_df, testx_df):
pca = PCA().fit(trainx_df)
itemindex = np.where(np.cumsum(pca.explained_variance_ratio_)>0.999)
print('np.cumsum(pca.explained_variance_ratio_)', np.cumsum(pca.explained_variance_ratio_))
#Plotting the Cumulative Summation of the Explained Variance
plt.figure(np.cumsum(pca.explained_variance_ratio_)[0])
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Principal Components Explained Variance')
plt.show()
pca_std = PCA(n_components=itemindex[0][0]).fit(trainx_df)
trainx_df = pca_std.transform(trainx_df)
testx_df = pca_std.transform(testx_df)
return trainx_df,testx_df
#change label 0 and 1
def encodeLabelsToZeroAndOne(trainy_df):
le=preprocessing.LabelEncoder()
trainy_df=le.fit_transform(trainy_df)
return trainy_df
def splitTrainAndTest(trainx_df, trainy_df, split_ratio=0.3):
X_train, X_test, y_train, y_test = train_test_split(trainx_df, trainy_df, test_size=split_ratio, random_state=42)
return X_train, X_test, y_train, y_test
def getLogisticRegressionModel(X_train, y_train, reg_par=0.00001, max_iterations=10000000):
logreg = LogisticRegression(class_weight="balanced", C=reg_par, max_iter=max_iterations)
logreg.fit(X_train, y_train)
return logreg
def getSVClassificationModel(X_train, y_train, reg_par=1.0,deg=3,ker='rbf'):
svcmodel=SVC(C=reg_par,degree=deg,kernel=ker)
svcmodel.fit(X_train,y_train)
return svcmodel
def getScores(model,X_train,X_test,y_train,y_test):
# THRESHOLD = 0.5
# yhat=np.where(model.predict_proba(X_test)[:,1] > THRESHOLD, 1,0)
yprobs= model.predict_log_proba(X_test)
yprobs = yprobs[:,1]
ras = roc_auc_score(y_test, yprobs, average='weighted')
print(ras)
yhat = model.predict(X_test)
TP,TN, FP,FN = 0,0,0,0
for i in range(len(yhat)):
if yhat[i] == 0:
if y_test[i]==0:
TN+=1
else:
FN+=1
else:
if y_test[i] == 1:
TP+=1
else:
FP+=1
print(classification_report(y_test, yhat))
print(classification_report(y_test, yhat, output_dict=True)['1']
['precision'], classification_report(y_test, yhat,output_dict=True)['1']
['recall'])
fpr,tpr,threshold = roc_curve(y_test, yprobs)
roc_auc = auc(fpr,tpr)
plt.title('receiver operating char')
plt.plot(fpr, tpr, 'b', label='AUC=%0.2f' %roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1],[0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('true positive rate')
plt.xlabel('false positive rate')
plt.show()
return ([TP, TN,FP,FN,TP/(TP+FN),TN/(TN+FP)])
return ([TP,TN,FP,FN,TP/(TP+FN), TN/(TN+FP)])
def predictTestx(Model, testx_df):
testpred = pd.DataFrame(Model.predict(testx_df))
testpred.to_csv('testpred.csv')
# fpr, tpr, threshold = roc_curve(y_test, yprobs)
# roc_auc = auc(fpr, tpr)
# plt.title('Receiver Operating Charateristic')
trainx_df,trainy_df,testx_df=readDataSets("/Users/pemayangdon/DataScience_2019501129/Introduction to Machine Learning/CodeCamp/Exercise2/marketing_training.csv",
"/Users/pemayangdon/DataScience_2019501129/Introduction to Machine Learning/CodeCamp/Exercise2/marketing_test.csv",
predict_col='responded')
trainx_df,testx_df=dropFeturesWithNullValuesGreaterThanALimit(trainx_df, testx_df,null_ratio=0.5)
trainx_df,testx_df=oneHotEncode(trainx_df,testx_df)
trainx_df,testx_df=fillMissingValues(trainx_df,testx_df)
trainx_df,testx_df=scaleFeatures(trainx_df,testx_df,scale='Standard')
trainy_df = encodeLabelsToZeroAndOne(trainy_df)
trainx_df,testx_df=findPrincipalComponents(trainx_df, testx_df)
X_train, X_test, y_train, y_test=splitTrainAndTest(trainx_df, trainy_df,split_ratio=0.3)
LogRegModel = getLogisticRegressionModel(X_train, y_train)
getScores(LogRegModel, X_train, X_test, y_train, y_test)
svcmodel = getSVClassificationModel(X_train, y_train, reg_par=0.5,deg=2,ker='poly')
| [
"pemayangdon@pemyanzen.local"
] | pemayangdon@pemyanzen.local |
15b2c5b66c9f84f6f1b8a358fac9053bb30b304a | c029647c1f3d06ac13fd519bddf8e25fc4e1b319 | /carla_rllib/carla_rllib-prak_evaluator-carla_rllib-prak_evaluator/carla_rllib/prak_evaluator/srunner/scenarioconfigs/scenarios/control_loss.py | 85d72b705a5f523073f8883e5a942dd9128e5a65 | [
"MIT"
] | permissive | TinaMenke/Deep-Reinforcement-Learning | 70f56eff4e9d54d70ea0eca6ca70e880fbdd74fb | 8ab0894b92e1f994802a218002021ee075c405bf | refs/heads/master | 2022-12-09T20:30:30.176090 | 2020-03-21T16:39:47 | 2020-03-21T16:39:47 | 249,012,679 | 9 | 2 | MIT | 2022-12-08T07:26:06 | 2020-03-21T16:12:14 | Python | UTF-8 | Python | false | false | 9,003 | py | #!/usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Control Loss Vehicle scenario:
The scenario realizes that the vehicle looses control due to
bad road conditions, etc. and checks to see if the vehicle
regains control and corrects it's course.
"""
import random
import py_trees
from srunner.scenariomanager.scenarioatomics.atomic_behaviors import *
from srunner.scenariomanager.scenarioatomics.atomic_criteria import CollisionTest
from srunner.scenariomanager.scenarioatomics.atomic_trigger_conditions import *
from srunner.scenarios.basic_scenario import BasicScenario
from srunner.tools.scenario_helper import *
CONTROL_LOSS_SCENARIOS = [
"ControlLoss"
]
class ControlLoss(BasicScenario):
"""
Implementation of "Control Loss Vehicle" (Traffic Scenario 01)
This is a single ego vehicle scenario
"""
category = "ControlLoss"
def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True,
timeout=60):
"""
Setup all relevant parameters and create scenario
"""
# ego vehicle parameters
self._no_of_jitter = 10
self._noise_mean = 0 # Mean value of steering noise
self._noise_std = 0.01 # Std. deviation of steering noise
self._dynamic_mean_for_steer = 0.001
self._dynamic_mean_for_throttle = 0.045
self._abort_distance_to_intersection = 10
self._current_steer_noise = [0] # This is a list, since lists are mutable
self._current_throttle_noise = [0]
self._start_distance = 20
self._trigger_dist = 2
self._end_distance = 30
self._ego_vehicle_max_steer = 0.0
self._ego_vehicle_max_throttle = 1.0
self._ego_vehicle_target_velocity = 15
self._map = CarlaDataProvider.get_map()
# Timeout of scenario in seconds
self.timeout = timeout
# The reference trigger for the control loss
self._reference_waypoint = self._map.get_waypoint(config.trigger_points[0].location)
self.loc_list = []
self.obj = []
super(ControlLoss, self).__init__("ControlLoss",
ego_vehicles,
config,
world,
debug_mode,
criteria_enable=criteria_enable)
def _initialize_actors(self, config):
"""
Custom initialization
"""
self._distance = random.sample(range(10, 80), 3)
self._distance = sorted(self._distance)
first_loc, _ = get_location_in_distance_from_wp(self._reference_waypoint, self._distance[0])
second_loc, _ = get_location_in_distance_from_wp(self._reference_waypoint, self._distance[1])
third_loc, _ = get_location_in_distance_from_wp(self._reference_waypoint, self._distance[2])
self.loc_list.extend([first_loc, second_loc, third_loc])
self._dist_prop = [x - 2 for x in self._distance]
self.first_loc_prev, _ = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[0])
self.sec_loc_prev, _ = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[1])
self.third_loc_prev, _ = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[2])
self.first_transform = carla.Transform(self.first_loc_prev)
self.sec_transform = carla.Transform(self.sec_loc_prev)
self.third_transform = carla.Transform(self.third_loc_prev)
self.first_transform = carla.Transform(carla.Location(self.first_loc_prev.x,
self.first_loc_prev.y,
self.first_loc_prev.z))
self.sec_transform = carla.Transform(carla.Location(self.sec_loc_prev.x,
self.sec_loc_prev.y,
self.sec_loc_prev.z))
self.third_transform = carla.Transform(carla.Location(self.third_loc_prev.x,
self.third_loc_prev.y,
self.third_loc_prev.z))
first_debris = CarlaActorPool.request_new_actor('static.prop.dirtdebris01', self.first_transform)
second_debris = CarlaActorPool.request_new_actor('static.prop.dirtdebris01', self.sec_transform)
third_debris = CarlaActorPool.request_new_actor('static.prop.dirtdebris01', self.third_transform)
self.obj.extend([first_debris, second_debris, third_debris])
for debris in self.obj:
debris.set_simulate_physics(False)
self.other_actors.append(first_debris)
self.other_actors.append(second_debris)
self.other_actors.append(third_debris)
def _create_behavior(self):
"""
The scenario defined after is a "control loss vehicle" scenario. After
invoking this scenario, it will wait until the vehicle drove a few meters
(_start_distance), and then perform a jitter action. Finally, the vehicle
has to reach a target point (_end_distance). If this does not happen within
60 seconds, a timeout stops the scenario
"""
# start condition
start_end_parallel = py_trees.composites.Parallel("Jitter",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
start_condition = InTriggerDistanceToLocation(self.ego_vehicles[0], self.first_loc_prev, self._trigger_dist)
for _ in range(self._no_of_jitter):
# change the current noise to be applied
turn = ChangeNoiseParameters(self._current_steer_noise, self._current_throttle_noise,
self._noise_mean, self._noise_std, self._dynamic_mean_for_steer,
self._dynamic_mean_for_throttle) # Mean value of steering noise
# Noise end! put again the added noise to zero.
noise_end = ChangeNoiseParameters(self._current_steer_noise, self._current_throttle_noise,
0, 0, 0, 0)
jitter_action = py_trees.composites.Parallel("Jitter",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
# Abort jitter_sequence, if the vehicle is approaching an intersection
jitter_abort = InTriggerDistanceToNextIntersection(self.ego_vehicles[0], self._abort_distance_to_intersection)
# endcondition: Check if vehicle reached waypoint _end_distance from here:
end_condition = DriveDistance(self.ego_vehicles[0], self._end_distance)
start_end_parallel.add_child(start_condition)
start_end_parallel.add_child(end_condition)
# Build behavior tree
sequence = py_trees.composites.Sequence("ControlLoss")
sequence.add_child(ActorTransformSetter(self.other_actors[0], self.first_transform, physics=False))
sequence.add_child(ActorTransformSetter(self.other_actors[1], self.sec_transform, physics=False))
sequence.add_child(ActorTransformSetter(self.other_actors[2], self.third_transform, physics=False))
jitter = py_trees.composites.Sequence("Jitter Behavior")
jitter.add_child(turn)
jitter.add_child(InTriggerDistanceToLocation(self.ego_vehicles[0], self.sec_loc_prev, self._trigger_dist))
jitter.add_child(turn)
jitter.add_child(InTriggerDistanceToLocation(self.ego_vehicles[0], self.third_loc_prev, self._trigger_dist))
jitter.add_child(turn)
jitter_action.add_child(jitter)
jitter_action.add_child(jitter_abort)
sequence.add_child(start_end_parallel)
sequence.add_child(jitter_action)
sequence.add_child(end_condition)
sequence.add_child(noise_end)
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
collision_criterion = CollisionTest(self.ego_vehicles[0])
criteria.append(collision_criterion)
return criteria
def change_control(self, control):
"""
This is a function that changes the control based on the scenario determination
:param control: a carla vehicle control
:return: a control to be changed by the scenario.
"""
control.steer += self._current_steer_noise[0]
control.throttle += self._current_throttle_noise[0]
return control
def __del__(self):
"""
Remove all actors upon deletion
"""
self.remove_all_actors()
| [
"noreply@github.com"
] | noreply@github.com |
9a378ac66d24667514820bb7ae2934ca7d3f4f35 | e2242f78a129f2b87252a0bf1621e8190fd07442 | /src/compas_vol/microstructures/tpms.py | 6264e6a6389be9c6043785e4474fb65d97fa8cda | [
"MIT"
] | permissive | ilmihur/compas_vol | 751237e00f841f25546accf1bf1db782aa9a4559 | 8aedc611bd96acd95d26b9f34c805a8ff05020bf | refs/heads/master | 2022-11-19T12:21:03.829785 | 2020-07-16T11:22:52 | 2020-07-16T11:22:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,724 | py | from math import pi, sin, cos
from compas import PRECISION
class TPMS(object):
"""A triply periodic minimal surface (TPMS) is defined by a type and a wavelength.
Parameters
----------
tpmstype: String
Type of TPMS. Currently avaliable are Gyroid, SchwartzP, Diamond, Neovius, Lidinoid and FischerKoch.
wavelength: float
The wavelength of the trigonometric function.
Examples
--------
>>> a = TPMS(tpmstype='Gyroid', wavelength=5.0)
"""
def __init__(self, tpmstype=0, wavelength=1.0):
self.tpmstypes = ['gyroid', 'schwartzp', 'diamond', 'neovius', 'lidinoid', 'fischerkoch']
self._tpmstype = None
self.tpmstype = tpmstype
self._wavelength = None
self.wavelength = wavelength
self._factor = self.wavelength/pi
# ==========================================================================
# descriptors
# ==========================================================================
@property
def tpmstype(self):
return self._tpmstype
@tpmstype.setter
def tpmstype(self, tpmstype):
if type(tpmstype) == str:
if tpmstype.lower() in self.tpmstypes:
self._tpmstype = self.tpmstypes.index(tpmstype.lower())
else:
self._tpmstype = 0
elif type(tpmstype) == int:
self._tpmstype = max(0, min(tpmstype, len(self.tpmstypes) - 1))
@property
def wavelength(self):
"""float: The wavelength of the TPMS."""
return self._wavelength
@wavelength.setter
def wavelength(self, wavelength):
self._wavelength = float(wavelength)
self._factor = self.wavelength/pi
def __repr__(self):
return 'TPMS({0},{1:.{2}f})'.format(self.tpmstype, self.wavelength, PRECISION[:1])
# ==========================================================================
# distance function
# ==========================================================================
def get_distance(self, point):
"""
single point distance function
"""
x, y, z = point
px = x/self._factor
py = y/self._factor
pz = z/self._factor
d = 0
if self.tpmstype == 0: # 'Gyroid':
d = sin(px)*cos(py) + sin(py)*cos(pz) + sin(pz)*cos(px)
elif self.tpmstype == 1: # 'SchwartzP':
d = cos(px) + cos(py) + cos(pz)
elif self.tpmstype == 2: # 'Diamond':
d = (
sin(px) * sin(py) * sin(pz) +
sin(px) * cos(py) * cos(pz) +
cos(px) * sin(py) * cos(pz) +
cos(px) * cos(py) * sin(pz)
)
elif self.tpmstype == 3: # 'Neovius':
d = (3 * cos(px) + cos(py) + cos(pz) +
4 * cos(px) * cos(py) * cos(pz))
elif self.tpmstype == 4: # 'Lidinoid':
d = (0.5 * (sin(2*px) * cos(py) * sin(pz) +
sin(2*py) * cos(py) * sin(px) +
sin(2*pz) * cos(px) * sin(pz)) -
0.5 * (cos(2*px) * cos(2*py) +
cos(2*py) * cos(2*pz) +
cos(2*pz) * cos(2*px)) + 0.15)
elif self.tpmstype == 5: # 'FischerKoch':
d = (cos(2*px) * sin(py) * cos(pz) +
cos(2*py) * sin(pz) * cos(px) +
cos(2*pz) * sin(px) * cos(py))
return d
def get_distance_numpy(self, x, y, z):
"""
vectorized distance function
"""
import numpy as np
px = x/self._factor
py = y/self._factor
pz = z/self._factor
d = 0
# Gyroid
if self.tpmstype == 0:
d = np.sin(px) * np.cos(py) + np.sin(py)*np.cos(pz) + np.sin(pz)*np.cos(px)
# SchwartzP
elif self.tpmstype == 1:
d = np.cos(px) + np.cos(py) + np.cos(pz)
# Diamond
elif self.tpmstype == 2:
d = (
np.sin(px) * np.sin(py) * np.sin(pz) +
np.sin(px) * np.cos(py) * np.cos(pz) +
np.cos(px) * np.sin(py) * np.cos(pz) +
np.cos(px) * np.cos(py) * np.sin(pz)
)
# Neovius
elif self.tpmstype == 3:
d = (3 * np.cos(px) + np.cos(py) + np.cos(pz) +
4 * np.cos(px) * np.cos(py) * np.cos(pz))
# Lidinoid
elif self.tpmstype == 4:
d = (0.5 * (np.sin(2*px) * np.cos(py) * np.sin(pz) +
np.sin(2*py) * np.cos(py) * np.sin(px) +
np.sin(2*pz) * np.cos(px) * np.sin(pz)) -
0.5 * (np.cos(2*px) * np.cos(2*py) +
np.cos(2*py) * np.cos(2*pz) +
np.cos(2*pz) * np.cos(2*px)) + 0.15)
# FischerKoch
elif self.tpmstype == 5:
d = (np.cos(2*px) * np.sin(py) * np.cos(pz) +
np.cos(2*py) * np.sin(pz) * np.cos(px) +
np.cos(2*pz) * np.sin(px) * np.cos(py))
# IWP?
return d
if __name__ == "__main__":
# from compas.geometry import Point
import numpy as np
import matplotlib.pyplot as plt
b = TPMS(tpmstype='schwartzP', wavelength=5)
print(b)
x, y, z = np.ogrid[-14:14:112j, -12:12:96j, -10:10:80j]
m = b.get_distance_numpy(x, y, z)
plt.imshow(m[:, :, 25].T, cmap='RdBu') # transpose because numpy indexing is 1)row 2) column instead of x y
plt.colorbar()
plt.axis('equal')
plt.show()
# for y in range(-15, 15):
# s = ''
# for x in range(-30, 30):
# d = b.get_distance(Point(x*0.5, y, 1.))
# if d < 0:
# s += 'x'
# else:
# s += '.'
# print(s)
| [
"bernhard@arch.ethz.ch"
] | bernhard@arch.ethz.ch |
15f097e5d25a77c286c586a759cb6af2c2055888 | 743d1e35108932987cd6c451561dceb4a9bd93fa | /function/16_local_variable.py | 339d8c6c9b4a8acea8863752bda79b6846edf4ba | [] | no_license | jun-young000/grammar-basic | bede88f8f1a4a85c1af8254b2d6b3e53ae1bbc1b | f0f0ce6b22bed8460162a9993551ebd2132bcf38 | refs/heads/main | 2023-07-07T07:36:52.560004 | 2021-08-10T12:27:56 | 2021-08-10T12:27:56 | 382,528,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | # 16_local_variable.py
# 예제 함수 정의
def show() :
a= 1 # 함수 내에서 정의된 지역 변수
a= a+1
print(a) #변수 a 는 함수 내에서만 사용 가능 / 화면에 a변수 값 2를 출력 하고 호출한곳으로 복귀
# 복귀가 되면 함수는 종료되고 만들었던 모든 변수는 제거
def show1(b) : # 인수가 b에 저장되면서 지역변수 b 생성됨
b=b+1 #b값을 1 증가
print(b) #b를 출력 - 함수 종료 - 호출한곳으로 복귀(변수b는 삭제)
show() # 함수 호출 하면 함수로 이동 후 함수문장실행 실행 종료 후 다시 호출한 곳으로 복귀한다.
# show(a) # a는 현재 파일에서는 함수 내부의 지역변수 이므로 함수 외부에서는 사용 불가
# NameError: name 'a' is not defined - a는 함수 내부의 지역변수이기 때문에
show1(20) # 함수 호출 -> 인수를 들고 show1 함수로 이동후에 인수를 매개변수에 저장(매개변수 생성) - 지역변수
# 복귀후에는 함수내 지역변수는 전부 사라짐
print(b) # 지역변수기 때문에 함수 외부에서는 사용 불가는
# 지역변수 : 함수 내부에서 생성되어서 함수 종료되면 제거되는 변수 / 함수외부에서는 사용할 수 없다다 | [
"taropoint1@naver.com"
] | taropoint1@naver.com |
d04bb4d89096e1542544dacd46c832a9276e2f3e | 256767f888195384e8a91bff0864d0afc3f7e4e9 | /python_scripts/runDifferentNATAndST.py | 86d11040c552b5a41ca89a2d36f0cb531f48d98c | [] | no_license | HelenHarman/AIS_Object_Tracking | a71b9c2df78c02180b7bb1ac2561a03a570ef935 | 3ec7128a739387299ac3e4a648e8c8f30274af97 | refs/heads/master | 2021-01-15T08:28:06.415775 | 2016-01-23T13:01:20 | 2016-01-23T13:01:20 | 43,375,968 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,051 | py | """
Runs the AIS tracking using different Network Affiliation Thresholds (NATs) and Stimulation Thresholds (STs).
Compares the results to the locations given in the Ground Truth (GT) file
"""
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import argparse
import re
import numpy as np
import os
application = "../build/AIS_Debug_build/AIS_TrackingAndDetection.app/Contents/MacOS/AIS_TrackingAndDetection"
confArg = " -conf ../DataSets/2PixelsLarger_tiger1.ais_config"#tiger1.ais_config"
resultsFile = "../Results/NAT_ST/"
numberOfInitialARBs = "-NIA 5"
outputBaseArg = " -output " + resultsFile # OT_ST should be added to the string
gtFileName = "../DataSets/Tiger1/groundtruth_rect.txt"
NATs = [0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
stimulationThresholds =[0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
###########################################
###### Runs AIS tracking ####
###########################################
def runTracking():
for NAT in NATs:
for stimulationThreshold in stimulationThresholds:
if NAT <= stimulationThreshold:
continue
resultsFullDir = outputBaseArg + str(NAT) + "_" + str(stimulationThreshold)
os.system(application + confArg + resultsFullDir + " -ot 1 -nat " + str(NAT) + " -st " + str(stimulationThreshold))
###########################################
###########################################
###### Comparison to the GT ####
###########################################
##
# For each of the NAT and ST used compares the results to the GT
def processResults():
resultsFramesMatched = []
resultsAreaMatched = []
resultsCenterDiff = []
for NAT in NATs:
otResultsFramesMatched = []
otResultsAreaMatched = []
otResultsCenterDiff = []
for stimulationThreshold in stimulationThresholds:
if NAT <= stimulationThreshold:
otResultsFramesMatched.append(0);
otResultsAreaMatched.append(0);
otResultsCenterDiff.append(0);
continue
framesMatched, areaMatched, centerDiff = processSingleResult(resultsFile + str(NAT) + "_" + str(stimulationThreshold) + "/locations.txt")
otResultsFramesMatched.append(framesMatched);
otResultsAreaMatched.append(areaMatched);
otResultsCenterDiff.append(centerDiff);
resultsFramesMatched.append(otResultsFramesMatched)
resultsAreaMatched.append(otResultsAreaMatched)
resultsCenterDiff.append(otResultsCenterDiff)
plotGraph(resultsFramesMatched, "Number of frames matched")
plotGraph(resultsAreaMatched, "Average area matched")
plotGraph(resultsCenterDiff, "Average center difference")
##
# Performs the comparison to the GT for one run the AIS tracking
# Returns : Number of matched frames (higher better), Average area match (higher better), Average center difference (lower better)
def processSingleResult(fileName):
locationsGt = readGTFile()
locationsFound = readFoundLocationsFile(fileName, locationsGt)
posDiff = 0
positionDiffs = []
# number of points belonging to A and B
# divided by
# all distict elements within the sets
area = 0.0
numMatchedFrames = 0
for i in range(0, len(locationsFound)-1):
xFound = locationsFound[i][0]
yFound = locationsFound[i][1]
widthFound = locationsFound[i][2]
heightFound = locationsFound[i][3]
xGt = locationsGt[i][0]
yGt = locationsGt[i][1]
widthGt = locationsGt[i][2]
heightGt = locationsGt[i][3]
interception = 0;
# Calculate the area of the frame that matches the GT
maxX = max(xFound, xGt)
minWidth = min(xFound+widthFound, xGt+widthGt)
for j in range(maxX, minWidth):
for k in range(max(yFound, yGt), min(yFound+heightFound, yGt+heightGt)):
interception = interception + 1
union = (widthFound*heightFound) + (widthGt*heightGt) - interception
area = area + (float(interception)/float(union))
# does the frame found match the GT?
if (float(interception)/float(union)) > 0.5:
numMatchedFrames = numMatchedFrames + 1
# calculate the ceneter difference
pos = abs((locationsFound[i][0]+(locationsFound[i][2]/2)) - (locationsGt[i][0]+(locationsGt[i][2]/2)))
pos = pos + abs((locationsFound[i][1]+(locationsFound[i][3]/2)) - (locationsGt[i][1]+(locationsGt[i][3]/2)))
positionDiffs.append(pos)
posDiff = posDiff + pos
return (numMatchedFrames), (area/len(locationsFound)),(posDiff/len(locationsFound))
##
# Stores the GT locations into an array
def readGTFile():
file = open(gtFileName, 'r')
locationsGt = []
lineNum = 0
for line in file:
locationsGt.append([])
for number in re.findall("[0-9]+", line):#line.split(','):
locationsGt[lineNum].append(int(number))
lineNum = lineNum + 1
file.close()
return locationsGt
##
# Stores the found locations into an 2D array
def readFoundLocationsFile(fileName, locationsGt):
file = open(fileName, 'r')
locationsFound = []
locationsFound.append([])
locationsFound[0] = locationsGt[0]
lineNum = 1
for line in file:
locationsFound.append([])
for number in re.findall("[0-9]+", line):#line.split(', '):
locationsFound[lineNum].append(int(number))
lineNum = lineNum + 1
file.close()
return locationsFound
def plotGraph(results, zlabel):
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(stimulationThresholds, NATs)
surf = ax.plot_surface(X, Y, results, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(0, 300)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('Stimulation Threshold')
ax.set_ylabel('NAT')
ax.set_zlabel(zlabel)
plt.show()
###########################################
############################################################
###### Plots the end number of ARBS for each ST and NAT ####
############################################################
##
# Show a graph the results
def showEndNumberOfARBs():
results = getResults()
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(stimulationThresholds, NATs)
surf = ax.plot_surface(X, Y, results, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(0, 250)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('Stimulation Threshold')
ax.set_ylabel('NAT')
ax.set_zlabel('Number of ARBs')
plt.show()
##
#
def getResults():
results = []
for NAT in NATs:
otResults = []
for stimulationThreshold in stimulationThresholds:
if NAT <= stimulationThreshold:
otResults.append(0);
continue
#read files into Z
file = open(resultsFile + str(NAT) + "_" + str(stimulationThreshold) + "/numberOfArbs.txt", 'r')
otResults.append(int(file.readlines()[-1]))
file.close()
print "otResults : " + str(otResults)
results.append(otResults)
return results
###########################################
# main
#runTracking()
#showEndNumberOfARBs()
processResults()
########################################### | [
"heh14@aber.ac.uk"
] | heh14@aber.ac.uk |
e1deda18e1b2b41d478d2dce08424bda03ba7c3b | 9836a073c4f9c4f371fdfe971045835a0736c73c | /Genetic_Algorithm/venv/bin/easy_install | 699a4908203cf7b83e063ac1763559119bd94de6 | [] | no_license | Volodimirich/LargeSystems | 962efce5be55d077f32b9d4b959d0cdba305f693 | dfaa70312cd48cfcb6c37cb9f777b29d381f85e8 | refs/heads/master | 2023-02-05T04:42:32.461806 | 2020-12-17T23:11:39 | 2020-12-17T23:11:39 | 295,217,829 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | #!/home/voland/Workspace/LargeSystems/Genetic_Algorithm/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"volodimirich@arccn.ru"
] | volodimirich@arccn.ru | |
2219b78d7cfa9f5dd1ab782b5bffc725cc294b50 | b0ca1e84804fe0fc1283addc7c90a6d6749887f2 | /Grafos/main.py | b42aa040923c42f227b5550d6fb3305e5c2862b5 | [] | no_license | santedicolaa/PO | 922f028afb5c0f3689e407e5959ecc62304caf0b | 0f17fe5fc8d5661796e17d51e42994aa492e5f60 | refs/heads/master | 2020-04-21T18:25:25.159875 | 2019-06-12T19:42:22 | 2019-06-12T19:42:22 | 169,468,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | import replit
import time
class Grafo():
def __init__(self):
self.inicio = {}
self.num_vertices = 0
self.lis_vertices = []
def addVertice(self, vertice):
inicio = self.inicio
if vertice in inicio:
print(vertice,"já pertence ao grafo")
time.sleep(1)
return 0
if vertice not in inicio:
inicio[vertice]={}
self.num_vertices = self.num_vertices +1
self.lis_vertices.append(vertice)
def addCaminho(self, v1, v2, peso):
inicio = self.inicio
if v1 not in inicio:
return 0
if v2 not in inicio:
return 0
inicio[v1][v2]=peso
inicio[v2][v1]=peso
return 1
def procurar(self, vertice):
inicio = self.inicio
if vertice in inicio:
print("\nO vértice",vertice,"pertence ao grafo.\n")
print("Adjacências e pesos:\n")
for j in grafo.inicio[vertice]:
print(" ", j, ", peso:",grafo.inicio[vertice][j])
print("\n")
else:
print("\n", vertice, "não pertence ao grafo.\n")
grafo = Grafo()
def menu():
replit.clear()
print("*** Grafos ***\n")
print("1 - Adicionar vértice")
print("2 - Adicionar caminho")
print("3 - Mostrar grafo")
print("4 - Procurar Vértice\n")
print("Escolha uma opção: ",end='')
escolha = int(input())
if escolha == 1:
replit.clear()
print("Nome do vértice: ",end='')
nome = (input())
grafo.addVertice(nome)
print("\n",nome,"adicionado com sucesso!")
time.sleep(1)
menu()
if escolha == 2:
replit.clear()
print("\nVértice de origem: ",end='')
v1 = (input())
print("\nVértice de destino: ",end='')
v2 = (input())
print("\nPeso do Caminho: ",end='')
peso = (input())
if(grafo.addCaminho(v1, v2, peso)):
print("\nCaminho adicionado com sucesso!")
time.sleep(1)
menu()
else:
print("\nVértice inexistente.")
time.sleep(1)
menu()
if escolha == 3:
replit.clear()
for i in grafo.lis_vertices:
print("-",i,":")
for j in grafo.inicio[i]:
print(" ", j, ", peso:",grafo.inicio[i][j])
print("\n")
print("Digite algo para voltar ao menun principal: ", end='')
if(input()):
menu()
if escolha == 4:
replit.clear()
print("\nVértice a ser procurado: ",end='')
grafo.procurar(input())
print("Digite algo para voltar ao menun principal: ", end='')
if(input()):
menu()
grafo = Grafo()
menu()
| [
"noreply@github.com"
] | noreply@github.com |
9cd4c96a648545918764e06df4c6c48ad22746ee | 43312a239823848b3f035a6ed6a52f23923004a9 | /EasyTeleBot/GenericFunctions.py | fb6f60ba3f41d59b224c99b6de334cb6c5e79aab | [
"MIT"
] | permissive | idozahavy/EasyTeleBot | 69a3072b8dbdc2a796b9763de94112015cdbb16a | ac5d2ff64d1a9a8382081ee9353596f5dd771e62 | refs/heads/master | 2022-11-21T00:58:52.468160 | 2020-06-28T15:48:54 | 2020-06-28T15:48:54 | 267,246,333 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | import copy
class Data(object):
def has_attribute(self, attr):
return hasattr(self, attr)
def __str__(self):
return str(self.__dict__)
def set_dictionary(self, dictionary: dict):
self.__dict__ = dictionary
def set_attribute(self, name, value):
self.__setattr__(name, value)
def get_attribute(self, name):
return self.__getattribute__(name)
def DecodeUTF8(text: str):
return text.encode('utf-8').decode()
def RemoveTemplateFormatName(text, format_name) -> str:
remove_from_index = text.find('${' + format_name + '}')
return text[:remove_from_index] + text[text.find('}', remove_from_index) + 1:]
def GetTemplateFormatNames(text) -> list:
names = []
start_index = text.find('${')
end_index = text.find('}', start_index)
while start_index != -1 and end_index != -1:
names.append(text[start_index + 2:end_index])
text = text[:start_index] + text[end_index + 1:]
start_index = text.find('${')
end_index = text.find('}', start_index)
return names
def RemoveUnreachableTemplateFormats(text_format: str, dat: Data):
new_text_format = text_format
format_names = GetTemplateFormatNames(text_format)
for format_name in format_names:
if not dat.has_attribute(format_name):
new_text_format = RemoveTemplateFormatName(new_text_format, format_name)
return new_text_format
def JoinDictionariesLists(starting_dict, dominant_dict):
""" takes the starting_dict and add to it the dominant_dict items
if values types are different, it saves the dominant value.
list values are combined.
dict values are recursive joined by this function.
:param starting_dict: the starting dict
:param dominant_dict: the dominant dictionary, if values types are different it saves this dictionary value.
:return: A combined dictionary of the two.
"""
result_dic = copy.deepcopy(starting_dict)
for key in dominant_dict:
value2 = dominant_dict[key]
if key in result_dic:
value1 = result_dic[key]
type_value1 = type(value1)
type_value2 = type(value2)
if type_value1 == type_value2:
if type_value1 is list:
result_dic[key] = value1 + value2
if type_value1 is str:
result_dic[key] = value2
if type_value1 is dict:
result_dic[key] = JoinDictionariesLists(value1, value2)
else:
result_dic[key] = value2
else:
result_dic[key] = value2
return result_dic
| [
"idozahavy@users.noreply.github.com"
] | idozahavy@users.noreply.github.com |
94c384e986f1ff3e417c487c88c6ae0ea86c0df1 | 6651618987136b8c785dd57b8a8152e168114884 | /sci_hrms/models/hr_contract.py | a3e26ae0f5c7f0665ff27d04e9c638e29c067269 | [] | no_license | suningwz/odoo12_addons_qlptn | bece654efa23ddfb4a1f4cfffda7f11f279a436c | 80785cdbbb742d0219e5b98dffc99c347da9b774 | refs/heads/master | 2021-05-17T22:06:55.679158 | 2020-03-03T03:46:05 | 2020-03-03T03:46:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,883 | py | from odoo import fields, api, models, _
import datetime
from dateutil.relativedelta import relativedelta
from odoo.http import request
from odoo.exceptions import ValidationError
import pytz
def canh_bao_tang_luong(i):
exp_date = i.salary_year + relativedelta(years=1)
lst_email_follow = []
if i.employee_id.parent_id:
lst_email_follow.append(i.employee_id.parent_id.work_email)
if i.job_id.user_id:
lst_email_follow.append(i.job_id.user_id.email)
if len(lst_email_follow) > 0:
mail_content = "Kính gửi: <b>" + i.employee_id.parent_id.name + "</b><br/> Nhân sự bạn phụ trách quản lý:" + \
"<br/> -Họ vào tên: " + i.employee_id.name + "<br/> - Điện thoại: " + i.employee_id.work_phone + \
"<br/> - Email: " + i.employee_id.work_email
if i.name:
mail_content += "<br/> - Kiểu hợp đồng: " + str(i.name)
if i.date_start:
mail_content += "<br/> - Ngày bắt đầu: " + str(i.date_start.strftime('%d/%m/%Y'))
if i.date_end:
mail_content += "<br/> - Ngày kết thúc: " + str(i.date_end.strftime('%d/%m/%Y'))
mail_content += "<br/> Đã sắp đến kỳ tăng lương vui lòng làm bảng đánh giá nhận tổng hợp những đóng góp trong quá trình làm việc." + \
"<br/>Trân trọng cám ơn!"
main_content = {
'subject': ('THÔNG BÁO NHÂN SỰ CHUẨN BỊ ĐẾN KỲ TĂNG LƯƠNG.'),
'author_id': i.env.user.partner_id.id,
'body_html': mail_content,
'email_to': ",".join(lst_email_follow),
}
i.env['mail.mail'].create(main_content).send()
i.write({
'salary_year': exp_date
})
def canh_bao_het_hop_dong(i):
if i.employee_id.work_email:
mail_employee = "Kính gửi: <b>" + i.employee_id.name + "</b>" + " <br/> Hợp đồng lao động của bạn đã sắp hết. Vui lòng liên hệ với quản lý trực tiếp của bạn để lấy mẫu đánh giá " + \
"quá trình làm việc.<br/>Trân trọng cám ơn!"
main_employee = {
'subject': ('SCI THÔNG BÁO - NHÂN SỰ HẾT HỢP ĐỒNG LAO ĐỘNG.'),
'body_html': mail_employee,
'email_to': ",".join([i.employee_id.work_email]),
}
i.env['mail.mail'].create(main_employee).send()
if i.employee_id.parent_id:
mail_parent = "Kính gửi: <b>" + i.employee_id.parent_id.name + "</b><br/> Nhân sự bạn quản lý:" + \
"<br/> - Họ vào tên: " + i.employee_id.name
if i.employee_id.work_phone:
mail_parent += "<br/> - Điện thoại: " + i.employee_id.work_phone
if i.employee_id.work_email:
mail_parent += "<br/> - Email: " + i.employee_id.work_email
if i.name:
mail_parent += "<br/> - Tên hợp đồng: " + str(i.name)
if i.name:
mail_parent += "<br/> - Loại hợp đồng: " + str(i.type_id.name)
if i.date_start:
mail_parent += "<br/> - Ngày bắt đầu: " + str(i.date_start.strftime('%d/%m/%Y'))
if i.date_end:
mail_parent += "<br/> - Ngày kết thúc: " + str(i.date_end.strftime('%d/%m/%Y'))
mail_parent += "<br/> Đã sắp hết hạn hợp đồng lao động." \
"Vui lòng liên hệ với phòng Hành Chính Nhân Sự để lấy mẫu đánh giá quá trình làm việc. <br/>Trân trọng cám ơn!"
main_parent = {
'subject': ('SCI THÔNG BÁO - NHÂN SỰ BẠN QUẢN LÝ HẾT HỢP ĐỒNG LAO ĐỘNG.'),
'body_html': mail_parent,
'email_to': ",".join([i.employee_id.parent_id.work_email]),
}
i.env['mail.mail'].create(main_parent).send()
if i.job_id.user_id:
mail_hr = "Kính gửi: <b>" + i.job_id.user_id.name + "</b><br/> Nhân sự bạn phụ trách tuyển dụng:" + \
"<br/> - Họ vào tên: " + i.employee_id.name
if i.employee_id.work_phone:
mail_hr += "<br/> - Điện thoại: " + i.employee_id.work_phone
if i.employee_id.work_email:
mail_hr += "<br/> - Email: " + i.employee_id.work_email
if i.name:
mail_parent += "<br/> - Tên hợp đồng: " + str(i.name)
if i.name:
mail_parent += "<br/> - Loại hợp đồng: " + str(i.type_id.name)
if i.date_start:
mail_hr += "<br/> - Ngày bắt đầu: " + str(i.date_start.strftime('%d%m/%Y'))
if i.date_end:
mail_hr += "<br/> - Ngày kết thúc: " + str(i.date_end.strftime('%d%m/%Y'))
mail_hr += "<br/> - Phòng ban: " + i.employee_id.department_id.name + \
"<br/> Đã sắp hết hạn hợp đồng lao động." \
"Vui lòng làm mẫu đánh giá quá trình làm việc gửi tới nhân sự. <br/>Trân trọng cám ơn!"
main_hr = {
'subject': ('SCI THÔNG BÁO - NHÂN SỰ BẠN PHỤ TRÁCH TUYỂN DỤNG HẾT HỢP ĐỒNG LAO ĐỘNG.'),
'body_html': mail_hr,
'email_to': ",".join([i.job_id.user_id.email]),
}
i.env['mail.mail'].create(main_hr).send()
DATETYPE = [('days', 'Ngày'), ('months', 'tháng'), ('years', 'năm')]
class HR_Contract(models.Model):
_inherit = 'hr.contract'
last_salary = fields.Date('Kỳ xét duyệt lần cuối', readonly="1")
salary_deadline_type = fields.Selection(DATETYPE, 'Date Type', default="years", required=True,
track_visibility="onchange")
salary_deadline = fields.Integer('Thời hạn xét duyệt', size=3, track_visibility="onchange", default=1)
salary_year = fields.Date(string="Ngày xét lương", compute="_compute_status")
salary_status = fields.Text('Tình trạng', compute="_compute_status")
type_id = fields.Many2one('hr.contract.type', string="Loại hợp đồng", required=True,
default=lambda self: self.env['hr.contract.type'].search([], limit=1))
# added by Thanh
basic_salary = fields.Monetary('Basic salary', digits=(16, 2), track_visibility="onchange")
allowance = fields.Monetary('Allowance')
KPI_salary = fields.Monetary('KPI salary')
name = fields.Char(compute='_get_contract_name', store=True, default='New contract')
decision_number = fields.Char('Decision number', readonly=True)
@api.depends('salary_deadline', 'salary_deadline_type', 'date_start')
def _compute_deadline_display(self):
for record in self:
if record.salary_deadline > 0 and record.salary_deadline_type:
time_type = ''
if record.salary_deadline_type == 'days':
time_type = _('days')
elif record.salary_deadline_type == 'months':
time_type = _('months')
elif record.salary_deadline_type == 'years':
time_type = _('years')
record.salary_year = str(record.salary_deadline) + ' ' + time_type
else:
record.salary_year = _('Undefined')
@api.depends('salary_deadline', 'salary_deadline_type', 'date_start', 'last_salary')
def _compute_status(self):
for record in self:
maintenance_msg = ''
if record.date_start:
date_start = record.date_start
if record.salary_deadline > 0 and record.salary_deadline_type:
if record.last_salary and record.last_salary > record.date_start:
date = datetime.datetime.strptime(record.last_salary, '%Y-%m-%d').date()
else:
date = date_start
deadine_salary = self.count_deadline(date, record.salary_deadline_type,
record.salary_deadline)
days = deadine_salary['days']
record.salary_year = deadine_salary['date']
if days < 0:
maintenance_msg += ('Quá hạn xét tăng lương {0} ngày').format(str(abs(days)))
elif days == 0:
maintenance_msg += ('Hôm nay là ngày xét tăng lương')
elif days < 15:
maintenance_msg += ('{0} ngày nữa là ngày xét tăng lương').format(str(abs(days)))
record.salary_status = maintenance_msg
def count_deadline(self, date, date_type, index):
time = datetime.datetime.now()
tz_current = pytz.timezone(self._context.get('tz') or 'UTC') # get timezone user
tz_database = pytz.timezone('UTC')
time = tz_database.localize(time)
time = time.astimezone(tz_current)
time = time.date()
if date_type == 'days':
date += relativedelta(days=+index)
elif date_type == 'months':
date += relativedelta(months=+index)
elif date_type == 'years':
date += relativedelta(years=+index)
days = (date - time).days
return {'date': date, 'days': days}
@api.multi
def print_contract(self):
contract_type_xml_id = self.type_id.get_external_id()[self.type_id.id]
templates = self.env['temp.creation'].search([('reference', 'ilike', contract_type_xml_id.split('_')[-1])])
if self.department_id:
grant_parent_dept = self.department_id.root_parent
grant_parent_dept_xml_id = grant_parent_dept.get_external_id()[grant_parent_dept.id]
template = templates.filtered(lambda t: t.reference.split('-')[0] == grant_parent_dept_xml_id)
if not template:
template = templates.filtered(lambda t: t.reference.split('-')[0] == 'sci')
else:
template = templates.filtered(lambda t: t.reference.split('-')[0] == 'sci')
if not template:
raise ValidationError(_('Contract template not available, please contact your admin.'))
return {'name': (_('Contract')),
'type': 'ir.actions.act_window',
'res_model': 'temp.wizard',
'view_mode': 'form',
'view_type': 'form',
'target': 'inline',
'view_id': self.env.ref('ms_templates.report_wizard').id,
'context': {'default_template_id': template.id}}
@api.depends('employee_id', 'type_id')
def _get_contract_name(self):
for record in self:
str_name = ''
if record.employee_id.employee_id:
str_name = record.employee_id.employee_id
if record.employee_id.name:
str_name += '-' + record.employee_id.name
if record.type_id:
str_name += '-' + record.type_id.name
record.name = str_name
@api.model
def create(self, vals):
res = super(HR_Contract, self).create(vals)
res.decision_number = self.env['ir.sequence'].next_by_code('hr.decision.number')
return res
@api.model
def update_salary_deadline(self):
data = self.search([('state', 'in', ('open', 'pending'))])
dt = datetime.datetime.now().date()
for record in data:
if record.date_end and record.date_end >= record.salary_year:
s = record.salary_year + relativedelta(days=-10)
if dt == s:
canh_bao_tang_luong(record)
if record.date_end:
ed = record.date_end + relativedelta(days=-10)
if dt == ed:
canh_bao_het_hop_dong(record)
return True
| [
"lex4vn@gmail.com"
] | lex4vn@gmail.com |
962da2abca34985938d9ede37484fcea375e39e4 | c730d4df20898a966b8ff215b2d3cce894bcf55e | /Linked_Lists/concatenate_circularLinkedList.py | 9cf6ab5a0b417f94f29737d725ed6562f0d0d219 | [] | no_license | venukumarbv/Datastructure_Algorithms_using_Python | 23a6996b171aafc0bcfc43f55e679ee6ef76c5d7 | cd32691edbf9f7b6cdfc16ea742f78fbc5f003e4 | refs/heads/master | 2022-11-19T22:27:38.751963 | 2020-07-21T10:46:55 | 2020-07-21T10:46:55 | 281,368,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | class Node:
def __init__(self,value):
self.info = value
self.link = None
class CircularLinkedList:
def __init__(self):
self.last = None
def insert_a_node(self, value):
temp = Node(value)
if self.last is None: # Create a logical cicular list during empty list
self.last = temp
self.last.link = self.last
# insert at end
temp.link = self.last.link
self.last.link = temp
self.last = temp
def create_list(self):
n = int(input("Enter number of Nodes: "))
for i in range(n):
value = int(input("Enter the vale of {} node ".format(i+1)))
self.insert_a_node(value)
def display(self):
if self.last is None:
print("The List is Empty")
return
p = self.last.link
while True:
print('-->', p.info, end='')
p = p.link
if p == self.last.link:
break
print()
def concatenate(self, list2):
if self.last is None:
self.last = list2.last.link
return
if list2.last is None:
return
p = self.last.link
self.last.link = list2.last.link
list2.last.link = p
self.last = list2.last
clist1 = CircularLinkedList()
clist2 = CircularLinkedList()
print("List 1")
clist1.create_list()
print("List 2")
clist2.create_list()
print("The List 1 is:")
clist1.display()
print("The List 2 is:")
clist2.display()
print("Concatenated List is :")
clist1.concatenate(clist2)
clist1.display() | [
"VKvision@venu.com"
] | VKvision@venu.com |
e0b3b2bf29c0e1156a00e4caded2f504e78bfba4 | 004f96dfdb295e6ba3da755c9c65ba87e8329ed4 | /hotel/models/models.py | 1e7b8ed19327a412bec3e9080f6229937e02052f | [] | no_license | MohamedSalahKamel123/Hotel | 9f67824b5b7ae6e3be12602d7baada8b448754af | 8d01ee3c12f1020d4c43295efc537cdeb1e76b17 | refs/heads/master | 2020-03-22T00:22:37.678945 | 2018-06-30T10:49:23 | 2018-06-30T10:49:23 | 139,240,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
# class hotel(models.Model):
# _name = 'hotel.hotel'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100 | [
"mohamedsalahkamel123@gmail.com"
] | mohamedsalahkamel123@gmail.com |
6d268fb1bb10e27331a3f7427f4e7ec31917a891 | 5e557741c8867bca4c4bcf2d5e67409211d059a3 | /test/distributed/elastic/timer/local_timer_example.py | 8d3702c9a70283500c437adc763c2e6090b382a9 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | Pandinosaurus/pytorch | a2bb724cfc548f0f2278b5af2fd8b1d2758adb76 | bb8978f605e203fbb780f03010fefbece35ac51c | refs/heads/master | 2023-05-02T20:07:23.577610 | 2021-11-05T14:01:30 | 2021-11-05T14:04:40 | 119,666,381 | 2 | 0 | NOASSERTION | 2021-11-05T19:55:56 | 2018-01-31T09:37:34 | C++ | UTF-8 | Python | false | false | 4,080 | py | #!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import signal
import time
import unittest
import torch.distributed.elastic.timer as timer
import torch.multiprocessing as torch_mp
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
IS_WINDOWS,
IS_MACOS,
sandcastle_skip_if,
)
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
def _happy_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(0.5)
def _stuck_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(5)
# timer is not supported on macos or windowns
if not (IS_WINDOWS or IS_MACOS):
class LocalTimerExample(unittest.TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
to enforce expiration of code-blocks.
Since torch multiprocessing's ``start_process`` method currently
does not take the multiprocessing context as parameter argument
there is no way to create the mp.Queue in the correct
context BEFORE spawning child processes. Once the ``start_process``
API is changed in torch, then re-enable ``test_torch_mp_example``
unittest. As of now this will SIGSEGV.
"""
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
# all processes should complete successfully
# since start_process does NOT take context as parameter argument yet
# this method WILL FAIL (hence the test is disabled)
torch_mp.spawn(
fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True
)
with self.assertRaises(Exception):
# torch.multiprocessing.spawn kills all sub-procs
# if one of them gets killed
torch_mp.spawn(
fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True
)
server.stop()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
# @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
# def test_example_start_method_forkserver(self):
# self._run_example_with(start_method="forkserver")
def _run_example_with(self, start_method):
spawn_ctx = mp.get_context(start_method)
mp_queue = spawn_ctx.Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
processes = []
for i in range(0, world_size):
if i % 2 == 0:
p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))
else:
p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))
p.start()
processes.append(p)
for i in range(0, world_size):
p = processes[i]
p.join()
if i % 2 == 0:
self.assertEqual(-signal.SIGKILL, p.exitcode)
else:
self.assertEqual(0, p.exitcode)
server.stop()
if __name__ == "__main__":
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
797d913b8e8db2d41ae09acf03025094110e6036 | b56751f13eda1438fed17de5f8fae02513daa318 | /assignment3/GANS.py | f80364e92d431368b6a5344633a6d20a80d9fdb2 | [] | no_license | WinterPan2017/cs231n | e48ca0ea3e3c069992462a492bdf6257da04f7ed | bd366b6e06bd9eca10aa09a32b00c41eece5e67b | refs/heads/main | 2023-02-03T10:02:18.662532 | 2020-12-23T11:18:07 | 2020-12-23T11:18:07 | 322,791,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,119 | py | '''
Description: Copyright © 1999 - 2020 Winter. All Rights Reserved.
Finished Generative_Adversarial_Networks_PyTorch.ipynb here.
Author: Winter
Email: 837950571@qq.com
Date: 2020-12-18 14:02:09
LastEditTime: 2020-12-20 16:46:55
'''
import torch
import torch.nn as nn
from torch.nn import init
import torchvision
import torchvision.transforms as T
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import sampler
import torchvision.datasets as dset
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
dtype = torch.FloatTensor
def show_images(images):
images = np.reshape(
images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(img.reshape([sqrtimg, sqrtimg]))
plt.show()
return
def preprocess_img(x):
return 2 * x - 1.0
def deprocess_img(x):
return (x + 1.0) / 2.0
def rel_error(x, y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def count_params(model):
"""Count the number of parameters in the current graph """
param_count = np.sum([np.prod(p.size()) for p in model.parameters()])
return param_count
class ChunkSampler(sampler.Sampler):
"""Samples elements sequentially from some offset.
Arguments:
num_samples: # of desired datapoints
start: offset where we should start selecting from
"""
def __init__(self, num_samples, start=0):
self.num_samples = num_samples
self.start = start
def __iter__(self):
return iter(range(self.start, self.start + self.num_samples))
def __len__(self):
return self.num_samples
def sample_noise(batch_size, dim):
"""
Generate a PyTorch Tensor of uniform random noise.
Input:
- batch_size: Integer giving the batch size of noise to generate.
- dim: Integer giving the dimension of noise to generate.
Output:
- A PyTorch Tensor of shape (batch_size, dim) containing uniform
random noise in the range (-1, 1).
"""
t = torch.rand((batch_size, dim)) * 2 - 1
return t
class Flatten(nn.Module):
def forward(self, x):
N, C, H, W = x.size() # read in N, C, H, W
return x.view(
N, -1
) # "flatten" the C * H * W values into a single vector per image
class Unflatten(nn.Module):
"""
An Unflatten module receives an input of shape (N, C*H*W) and reshapes it
to produce an output of shape (N, C, H, W).
"""
def __init__(self, N=-1, C=128, H=7, W=7):
super(Unflatten, self).__init__()
self.N = N
self.C = C
self.H = H
self.W = W
def forward(self, x):
return x.view(self.N, self.C, self.H, self.W)
def initialize_weights(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d):
init.xavier_uniform_(m.weight.data)
def discriminator():
"""
Build and return a PyTorch model implementing the architecture above.
"""
model = nn.Sequential(Flatten(), nn.Linear(784, 256), nn.LeakyReLU(0.01),
nn.Linear(256, 256), nn.LeakyReLU(0.01),
nn.Linear(256, 1))
return model
def generator(noise_dim=96):
"""
Build and return a PyTorch model implementing the architecture above.
"""
model = nn.Sequential(nn.Linear(noise_dim, 1024), nn.ReLU(),
nn.Linear(1024, 1024), nn.ReLU(),
nn.Linear(1024, 784), nn.Tanh())
return model
def bce_loss(scores, target):
"""
Numerically stable version of the binary cross-entropy loss function.
As per https://github.com/pytorch/pytorch/issues/751
See the TensorFlow docs for a derivation of this formula:
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Inputs:
- scores: PyTorch Tensor of shape (N, ) giving scores.
- target: PyTorch Tensor of shape (N,) containing 0 and 1 giving targets.
Returns:
- A PyTorch Tensor containing the mean BCE loss over the minibatch of input data.
"""
neg_abs = -scores.abs()
loss = scores.clamp(min=0) - scores * target + (1 + neg_abs.exp()).log()
return loss.mean()
def discriminator_loss(logits_real, logits_fake):
"""
Computes the discriminator loss described above.
Inputs:
- logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
- logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Returns:
- loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
"""
y_real = torch.ones_like(logits_real)
y_fake = torch.zeros_like(logits_fake)
loss = bce_loss(logits_fake, y_fake) + bce_loss(logits_real, y_real)
return loss
def generator_loss(logits_fake):
"""
Computes the generator loss described above.
Inputs:
- logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Returns:
- loss: PyTorch Tensor containing the (scalar) loss for the generator.
"""
y_real = torch.ones_like(logits_fake)
loss = bce_loss(logits_fake, y_real)
return loss
def get_optimizer(model):
"""
Construct and return an Adam optimizer for the model with learning rate 1e-3,
beta1=0.5, and beta2=0.999.
Input:
- model: A PyTorch model that we want to optimize.
Returns:
- An Adam optimizer for the model with the desired hyperparameters.
"""
optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=[0.5, 0.999])
return optimizer
def run_a_gan(D,
G,
D_solver,
G_solver,
discriminator_loss,
generator_loss,
show_every=250,
batch_size=128,
noise_size=96,
num_epochs=10):
"""
Train a GAN!
Inputs:
- D, G: PyTorch models for the discriminator and generator
- D_solver, G_solver: torch.optim Optimizers to use for training the
discriminator and generator.
- discriminator_loss, generator_loss: Functions to use for computing the generator and
discriminator loss, respectively.
- show_every: Show samples after every show_every iterations.
- batch_size: Batch size to use for training.
- noise_size: Dimension of the noise to use as input to the generator.
- num_epochs: Number of epochs over the training dataset to use for training.
"""
iter_count = 0
for epoch in range(num_epochs):
for x, _ in loader_train:
if len(x) != batch_size:
continue
D_solver.zero_grad()
real_data = x.type(dtype)
logits_real = D(2 * (real_data - 0.5)).type(dtype)
g_fake_seed = sample_noise(batch_size, noise_size).type(dtype)
fake_images = G(g_fake_seed).detach()
logits_fake = D(fake_images.view(batch_size, 1, 28, 28))
d_total_error = discriminator_loss(logits_real, logits_fake)
d_total_error.backward()
D_solver.step()
G_solver.zero_grad()
g_fake_seed = sample_noise(batch_size, noise_size).type(dtype)
fake_images = G(g_fake_seed)
gen_logits_fake = D(fake_images.view(batch_size, 1, 28, 28))
g_error = generator_loss(gen_logits_fake)
g_error.backward()
G_solver.step()
if (iter_count % show_every == 0):
print('Iter: {}, D: {:.4}, G:{:.4}'.format(
iter_count, d_total_error.item(), g_error.item()))
imgs_numpy = fake_images.data.cpu().numpy()
show_images(imgs_numpy[0:16])
plt.show()
print()
iter_count += 1
def ls_discriminator_loss(scores_real, scores_fake):
"""
Compute the Least-Squares GAN loss for the discriminator.
Inputs:
- scores_real: PyTorch Tensor of shape (N,) giving scores for the real data.
- scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Outputs:
- loss: A PyTorch Tensor containing the loss.
"""
loss = 0.5 * (torch.Tensor(scores_real - 1)**2 + scores_fake**2)
return loss.mean()
def ls_generator_loss(scores_fake):
"""
Computes the Least-Squares GAN loss for the generator.
Inputs:
- scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Outputs:
- loss: A PyTorch Tensor containing the loss.
"""
loss = 0.5 * torch.Tensor(scores_fake - 1)**2
return loss.mean()
def build_dc_classifier():
"""
Build and return a PyTorch model for the DCGAN discriminator implementing
the architecture above.
"""
return nn.Sequential(
Unflatten(128, 1, 28, 28),
nn.Conv2d(1, 32, 5, 1),
nn.LeakyReLU(0.01),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 5, 1),
nn.LeakyReLU(0.01),
nn.MaxPool2d(2, 2),
Flatten(),
nn.Linear(1024, 4 * 4 * 64),
nn.LeakyReLU(0.01),
nn.Linear(4 * 4 * 64, 1)
)
def build_dc_generator(noise_dim=96):
"""
Build and return a PyTorch model implementing the DCGAN generator using
the architecture described above.
"""
return nn.Sequential(
nn.Linear(noise_dim, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Linear(1024, 7*7*128),
nn.ReLU(),
nn.BatchNorm1d(7*7*128),
Unflatten(-1, 128, 7, 7),
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64, 1, 4, 2, 1),
nn.Tanh(),
Flatten()
)
if __name__ == "__main__":
answers = dict(np.load('gan-checks-tf.npz'))
NUM_TRAIN = 50000
NUM_VAL = 5000
NOISE_DIM = 96
batch_size = 128
mnist_train = dset.MNIST('./datasets/MNIST_data',
train=True,
download=True,
transform=T.ToTensor())
loader_train = DataLoader(mnist_train,
batch_size=batch_size,
sampler=ChunkSampler(NUM_TRAIN, 0))
mnist_val = dset.MNIST('./datasets/MNIST_data',
train=True,
download=True,
transform=T.ToTensor())
loader_val = DataLoader(mnist_val,
batch_size=batch_size,
sampler=ChunkSampler(NUM_VAL, NUM_TRAIN))
imgs = loader_train.__iter__().next()[0].view(batch_size,
784).numpy().squeeze()
# show_images(imgs)
batch_size = 3
dim = 4
torch.manual_seed(231)
z = sample_noise(batch_size, dim)
np_z = z.cpu().numpy()
assert np_z.shape == (batch_size, dim)
assert torch.is_tensor(z)
assert np.all(np_z >= -1.0) and np.all(np_z <= 1.0)
assert np.any(np_z < 0.0) and np.any(np_z > 0.0)
print('All tests passed!')
model = discriminator()
true_count = 267009
cur_count = count_params(model)
if cur_count != true_count:
print(
'Incorrect number of parameters in discriminator. Check your achitecture.'
)
else:
print('Correct number of parameters in discriminator.')
model = generator(4)
true_count = 1858320
cur_count = count_params(model)
if cur_count != true_count:
print(
'Incorrect number of parameters in generator. Check your achitecture.'
)
else:
print('Correct number of parameters in generator.')
d_loss = discriminator_loss(
torch.Tensor(answers['logits_real']).type(dtype),
torch.Tensor(answers['logits_fake']).type(dtype)).cpu().numpy()
print("Maximum error in d_loss: %g" %
rel_error(answers['d_loss_true'], d_loss))
g_loss = generator_loss(torch.Tensor(
answers['logits_fake']).type(dtype)).cpu().numpy()
print("Maximum error in g_loss: %g" %
rel_error(answers['g_loss_true'], g_loss))
# # Make the discriminator
# D = discriminator().type(dtype)
# # Make the generator
# G = generator().type(dtype)
# # Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
# D_solver = get_optimizer(D)
# G_solver = get_optimizer(G)
# # Run it!
# run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss)
score_real = torch.Tensor(answers['logits_real']).type(dtype)
score_fake = torch.Tensor(answers['logits_fake']).type(dtype)
d_loss = ls_discriminator_loss(answers['logits_real'],
score_fake).cpu().numpy()
g_loss = ls_generator_loss(answers['logits_fake']).cpu().numpy()
print("Maximum error in d_loss: %g" %
rel_error(answers['d_loss_lsgan_true'], d_loss))
print("Maximum error in g_loss: %g" %
rel_error(answers['g_loss_lsgan_true'], g_loss))
# D_LS = discriminator().type(dtype)
# G_LS = generator().type(dtype)
# D_LS_solver = get_optimizer(D_LS)
# G_LS_solver = get_optimizer(G_LS)
# run_a_gan(D_LS, G_LS, D_LS_solver, G_LS_solver, ls_discriminator_loss, ls_generator_loss)
data = next(enumerate(loader_train))[-1][0].type(dtype)
print(data.size())
b = build_dc_classifier().type(dtype)
out = b(data)
print(out.size())
true_count = 1102721
model = build_dc_classifier()
cur_count = count_params(model)
if cur_count != true_count:
print(
'Incorrect number of parameters in generator. Check your achitecture.'
)
else:
print('Correct number of parameters in generator.')
test_g_gan = build_dc_generator().type(dtype)
test_g_gan.apply(initialize_weights)
fake_seed = torch.randn(batch_size, NOISE_DIM).type(dtype)
fake_images = test_g_gan.forward(fake_seed)
fake_images.size()
model = build_dc_generator(4)
true_count=6580801
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in generator. Check your achitecture.')
else:
print('Correct number of parameters in generator.')
D_DC = build_dc_classifier().type(dtype)
D_DC.apply(initialize_weights)
G_DC = build_dc_generator().type(dtype)
G_DC.apply(initialize_weights)
D_DC_solver = get_optimizer(D_DC)
G_DC_solver = get_optimizer(G_DC)
run_a_gan(D_DC, G_DC, D_DC_solver, G_DC_solver, discriminator_loss, generator_loss, num_epochs=5) | [
"837950571@qq.com"
] | 837950571@qq.com |
d5456408727a5ab95b58366a7abfc66028440313 | 389be0f88fb2d71475b12963ca5e21d0b1c1f7fb | /src/drem/transform/ber_publicsearch.py | b98564cf278413314c503ef5a846504bd472e5a6 | [
"MIT"
] | permissive | codema-dev/drem | 7099c1cb1eef623e274dd9b6920b265df768173d | b7e4a47c3ae10040d6ea22ac4fa726d07d7ef508 | refs/heads/master | 2023-02-16T23:05:50.148136 | 2021-01-13T12:46:01 | 2021-01-13T12:46:01 | 281,381,878 | 4 | 5 | MIT | 2021-01-13T12:46:03 | 2020-07-21T11:43:30 | Python | UTF-8 | Python | false | false | 2,760 | py | from pathlib import Path
from typing import Any
import numpy as np
import pandas as pd
from prefect import Flow
from prefect import Parameter
from prefect import Task
from prefect import task
from prefect.utilities.debug import raise_on_exception
import drem.utilities.dask_dataframe_tasks as ddt
import drem.utilities.pandas_tasks as pdt
from drem.utilities.visualize import VisualizeMixin
@task
def _bin_year_of_construction_as_in_census(
ber: pd.DataFrame, target: str, result: str,
) -> pd.DataFrame:
ber = ber.copy()
year = ber[target].fillna(0).astype(int).to_numpy()
conditions = [
year <= 1919,
year < 1946,
year < 1961,
year < 1971,
year < 1981,
year < 1991,
year < 2001,
year < 2010,
year < 2025,
year == 0,
]
choices = [
"before 1919",
"1919 - 1945",
"1946 - 1960",
"1961 - 1970",
"1971 - 1980",
"1981 - 1990",
"1991 - 2000",
"2001 - 2010",
"2011 or later",
"not stated",
]
ber.loc[:, result] = np.select(conditions, choices, default="ERROR")
return ber
with Flow("Cleaning the BER Data...") as flow:
ber_fpath = Parameter("ber_fpath")
raw_ber = ddt.read_parquet(ber_fpath)
get_dublin_rows = pdt.get_rows_where_column_contains_substring(
raw_ber, target="CountyName", substring="Dublin",
)
raw_dublin_ber = ddt.compute(get_dublin_rows)
rename_postcodes = pdt.rename(raw_dublin_ber, columns={"CountyName": "postcodes"})
bin_year_built_into_census_categories = _bin_year_of_construction_as_in_census(
rename_postcodes, target="Year_of_Construction", result="cso_period_built",
)
class TransformBERPublicsearch(Task, VisualizeMixin):
"""Clean BER Data in a Prefect flow.
Args:
Task (prefect.Task): see https://docs.prefect.io/core/concepts/tasks.html
VisualizeMixin (object): Mixin to add flow visualization method
"""
def __init__(self, **kwargs: Any):
"""Initialise Task.
Args:
**kwargs (Any): see https://docs.prefect.io/core/concepts/tasks.html
"""
self.flow = flow
super().__init__(**kwargs)
def run(self, input_filepath: Path, output_filepath: Path) -> None:
"""Run flow.
Args:
input_filepath (Path): Path to input data
output_filepath (Path): Path to output data
"""
with raise_on_exception():
state = self.flow.run(ber_fpath=input_filepath)
result = state.result[bin_year_built_into_census_categories].result
result.to_parquet(output_filepath)
transform_ber_publicsearch = TransformBERPublicsearch()
| [
"noreply@github.com"
] | noreply@github.com |
02a33085d35c70f94f68dab669344b797c917898 | 871ceeae4a3e8575f2f4d46d5cec6398d5571884 | /py3.cgi | f53ebc0a48c0dfc03e075df2f7eede0b4147259e | [] | no_license | AhmadAlberkawi/Learntech | b27eb0a4cc8cb8ac819ebf789cc8eb94dd98c4c2 | 309181c8f71206a7129fccebe50a91febfc78f21 | refs/heads/master | 2023-05-27T06:44:34.615082 | 2020-07-13T12:58:51 | 2020-07-13T12:58:51 | 199,425,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | cgi | #!/bin/python
# Modul cgi
import cgi, cgitb
# Ausgabe bei Fehler
cgitb.enable()
# Objekt der Klasse FieldStorage
#form = cgi.FieldStorage()
# Einzelne Elemente des Objekts
name = form.getvalue('Name')
vorname = form.getvalue('Vorname')
email = form.getvalue('Email')
# HTML-Dokument mit Variablen
print ("content-type: text/html\n")
print ("<!DOCTYPE html>")
print ('<html lang="de">')
print ("<head>")
print ("<title>Dateneingabe</title>")
print ('<link rel="stylesheet" href="stylesheet.css">')
print ("</head>")
print ("<body>")
print ("<p><b>Registrierte Daten:</b></p>")
print ("<p>Nachname:", name, "</p>")
print ("<p>Vorname:", vorname, "</p>")
print ("<p>Email:", email, "</p>")
print ("</body>")
print ("</html>")
| [
"ahmads-96@hotmail.com"
] | ahmads-96@hotmail.com |
609168f5863476adca41f1821aa9e52272089d4d | 5fef7e0095b57a80ef3672945ac5438db22fcb1f | /AimOffer/SerializeBinaryTree.py | d201f75a031f4a4698c76551af22a379236cf03e | [] | no_license | wbq9224/Leetcode_Python | 0feb8b509ce3ab194fc10a52ed5c90b8bfe12d92 | c1f27c0cec80585095ce98a678ab85079e1a4c46 | refs/heads/master | 2020-03-23T15:02:21.860337 | 2018-09-16T08:48:51 | 2018-09-16T08:48:51 | 141,716,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | from AimOffer.ConstructBinaryTree import *
class Solution:
def Serialize(self, root):
# write code here
if not root:
return ['$']
left = self.Serialize(root.left)
right = self.Serialize(root.right)
return [root.value] + left + right
def Deserialize_core(self, s, index):
if index >= len(s) or s[index] == '$':
return None, index
p_node = TreeNode(s[index], None, None)
p_node.left, index = self.Deserialize_core(s, index + 1)
p_node.right, index = self.Deserialize_core(s, index + 1)
return p_node, index
def Deserialize(self, s):
# write code here
return self.Deserialize_core(s, 0)[0]
if __name__ == '__main__':
pre = [1, 2, 4, 3, 5, 6]
in_o = [4, 2, 1, 5, 3, 6]
root = construct(pre, in_o, 0, len(pre) - 1, 0, len(in_o) - 1)
str = Solution().Serialize(root)
print(str)
root = Solution().Deserialize(str)
post = []
post_order_travl(root, post)
print(post)
| [
"ziwuchui110w@163.com"
] | ziwuchui110w@163.com |
31c75823ceccc46b7570986abb36366707a7b394 | f995860ad78fc266d04b03c3478c74e989d8b568 | /PE/pe0178.py | 50751fc483b0c73fe0a8686c699f684a06e3cf11 | [] | no_license | 196884/Python | edd0234fd72a40d7a0b3310776edcaa8bda74478 | 8dc2e7a32dd350227cde748600e713dc3eea3f4a | refs/heads/master | 2016-09-06T19:26:19.860746 | 2015-11-09T00:09:23 | 2015-11-09T00:09:23 | 28,167,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | def initList():
r = []
for i in range(0, 10):
r.append([0 for j in range(0, 10)])
return r
def solve():
# dynamic programming:
# after n steps, l[i][j] is the number of paths:
# * of length n
# * starting at 0
# * going up or down by 1 at each step
# * with minimum -i, and maximum +j
r = 0
l = initList()
l[0][0] = 1
for n in range(1, 40):
lNew = initList()
for i in range(0, 10):
for j in range(0, 9):
lNew[max(0, i-1)][j+1] += l[i][j]
lNew[j+1][max(0, i-1)] += l[j][i]
l = lNew
for i in range(1, 10): # The starting with a 0 is covered in the previous count!
r += l[i][9-i]
return r
if __name__ == "__main__":
result = solve()
print "Result: %d" % result
| [
"regis.dupont+git@m4x.org"
] | regis.dupont+git@m4x.org |
d0f7ae8b7499a9ca59ab3835244c320159fe0290 | d6589ff7cf647af56938a9598f9e2e674c0ae6b5 | /imagesearch-20201214/setup.py | 3e51f0f9157b6734ebf9de9021339da732085c83 | [
"Apache-2.0"
] | permissive | hazho/alibabacloud-python-sdk | 55028a0605b1509941269867a043f8408fa8c296 | cddd32154bb8c12e50772fec55429a9a97f3efd9 | refs/heads/master | 2023-07-01T17:51:57.893326 | 2021-08-02T08:55:22 | 2021-08-02T08:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_imagesearch20201214.
Created on 20/05/2021
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_imagesearch20201214"
NAME = "alibabacloud_imagesearch20201214" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud image search (20201214) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.3, <1.0.0",
"alibabacloud_oss_sdk>=0.1.0, <1.0.0",
"alibabacloud_tea_rpc>=0.1.0, <1.0.0",
"alibabacloud_openplatform20191219>=1.1.1, <2.0.0",
"alibabacloud_oss_util>=0.0.5, <1.0.0",
"alibabacloud_tea_fileform>=0.0.3, <1.0.0",
"alibabacloud_tea_openapi>=0.2.4, <1.0.0",
"alibabacloud_openapi_util>=0.1.4, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","imagesearch20201214"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
5dd5a8300a0fca9f4b6b90e21a384d96b58795da | 6965dad35168a486fd824b9a436a7de066eab37a | /test/cal_len.py | e98f32537c6b62f7594b31b7d4664101e31c46f9 | [] | no_license | baoyi33/prac_1 | 7892acca16c89eba162448e8ae4cb9fabf9d7a78 | e7fa0f01f42367a84292c34dd4785144dbe18aee | refs/heads/master | 2020-04-01T22:46:44.178294 | 2018-10-24T21:54:14 | 2018-10-24T21:54:14 | 153,725,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | #!/user/bin/python
with open("test.fa") as fasta: # open test.fa file
length = 0
next(fasta) # not the first line
for line in fasta:
print(line)
line = line.rstrip() # new line
length = length + len(line)
print(length)
| [
"baoyi33k@gmail.com"
] | baoyi33k@gmail.com |
bc7cfbe5d9a28ea6fa5e9f3faf22fbafcb249bd5 | 6472434d4e6d046200b1d5b498db49593806b485 | /jdzc.py | a89db5d8652d9388f60d5527e4141f79f690726d | [] | no_license | shoaly/2rss | a9a70a66f38d9b43f141d9d0bb7eaf4062e0c72f | cfe4a51e365513340bec85d75c5298c765c2c7d0 | HEAD | 2016-09-05T12:36:42.994486 | 2015-02-06T12:26:18 | 2015-02-06T12:26:18 | 29,582,728 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,306 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import pystache
import requests
import re
import time
import os
from datetime import *
from pyquery import PyQuery as pq
from lxml import etree
# print pystache.render('Hi {{person}}!', {'person': 'Mom'})
class RSS:
def __init__(self,url,output_file,tpl,encode_code="utf-8",rss_title="unnamed rss",method="get",params={}):
self.path = os.path.dirname(os.path.abspath(__file__))
self.encode_code = encode_code
self.url = url
self.rss_title = rss_title
self.output_file = output_file
self.tpl = tpl
self.params = params
self.method = method
# print self.path
def generate_rss(self,data):
with open(self.path+"/"+self.tpl,"r") as file:
rss_tpl = file.read()
renderer = pystache.Renderer(file_encoding="utf-8",string_encoding="utf-8")
data['rss_title'] = self.rss_title
data['source_url'] = self.url
rss = pystache.render(rss_tpl, data)
return rss
def write_to_file(self,content):
print "write to " + self.output_file
with open(self.path+'/'+self.output_file, 'w') as xml_file:
# print repr(content)
xml_file.write(content.encode("utf-8"))
#返回 unicode
def fetch_web_page(self):
print self.method + ": " + self.url
if self.method == "get":
response = requests.get(self.url)
content = response.content
return content.decode(self.encode_code)
else :
response = requests.post(self.url,self.params)
content = response.content
return content.decode(self.encode_code)
# 获取 item的全文内容, unicode 编码
def load_item_full_content(self,link):
response = requests.get(link)
response = response.content
response = response.decode(self.encode_code)
jQuery = pq(response)
content = jQuery('.mb30')
# print response.decode("gbk").encode("utf-8")
content = jQuery(content[0]).html()
# 替换掉乱码 <?xml:namespace prefix = o ns = "urn:schemas-microsoft-com:office:office" /?>
# content = re.sub(re.compile("<\?xml.*?/\?>"),'',content)
return content
def filter_web_page(self):
page = self.fetch_web_page()
jQuery = pq(page)
source_item = jQuery(".q-title a")
items = []
for key,row in enumerate(source_item):
title = jQuery(row).text()
link = jQuery(row).attr("href")
description = ""
description = self.load_item_full_content(link)
items.append({"title":title,"link":link,"description":description})
print "done: " + link
# break
return items
rss = RSS(encode_code="utf-8",url="http://z.jd.com/search.html",rss_title=u"京东众筹 Rss",output_file="jdzc.xml",tpl="tpl.py",method="post",params={"parentIdHidden":10,"pageNoHidden":1,"sortHidden":"zhtj","wHidden":"关键字查找"})
items = rss.filter_web_page();
# print items
rss_content = rss.generate_rss({"items":items,"lastBuildDate":datetime.today()})
rss.write_to_file(rss_content)
| [
"shoaly@qq.com"
] | shoaly@qq.com |
88dfb09583a57d41a21b54043ae54aaf5acc50da | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/spring/azext_spring/vendored_sdks/appplatform/v2022_11_01_preview/operations/_buildpack_binding_operations.py | fa960c96a5d386a8bcb2472976a7bf735c0cfe36 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 37,434 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
"buildpackBindingName": _SERIALIZER.url("buildpack_binding_name", buildpack_binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
"buildpackBindingName": _SERIALIZER.url("buildpack_binding_name", buildpack_binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
"buildpackBindingName": _SERIALIZER.url("buildpack_binding_name", buildpack_binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class BuildpackBindingOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2022_11_01_preview.AppPlatformManagementClient`'s
:attr:`buildpack_binding` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
**kwargs: Any
) -> _models.BuildpackBindingResource:
"""Get a buildpack binding by name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BuildpackBindingResource or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResource]
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: Union[_models.BuildpackBindingResource, IO],
**kwargs: Any
) -> _models.BuildpackBindingResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(buildpack_binding, (IO, bytes)):
_content = buildpack_binding
else:
_json = self._serialize.body(buildpack_binding, "BuildpackBindingResource")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: _models.BuildpackBindingResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BuildpackBindingResource]:
"""Create or update a buildpack binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:param buildpack_binding: The target buildpack binding for the create or update operation.
Required.
:type buildpack_binding:
~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BuildpackBindingResource]:
"""Create or update a buildpack binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:param buildpack_binding: The target buildpack binding for the create or update operation.
Required.
:type buildpack_binding: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: Union[_models.BuildpackBindingResource, IO],
**kwargs: Any
) -> LROPoller[_models.BuildpackBindingResource]:
"""Create or update a buildpack binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:param buildpack_binding: The target buildpack binding for the create or update operation. Is
either a model type or a IO type. Required.
:type buildpack_binding:
~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResource]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
buildpack_binding=buildpack_binding,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Operation to delete a Buildpack Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
@distributed_trace
def list(
self, resource_group_name: str, service_name: str, build_service_name: str, builder_name: str, **kwargs: Any
) -> Iterable["_models.BuildpackBindingResource"]:
"""Handles requests to list all buildpack bindings in a builder.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResourceCollection]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BuildpackBindingResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings"} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
5388ceee2a8d5ef63ab3028b840380fb21e77d35 | 565c458353f0ec256a0a29e11aa0e2f868875b8d | /tracking_test.py | 8eb858e4d5cbb10af0a249f944b885a799a167ae | [] | no_license | Robo-Sapien/Maze-Perilous-Real-Time-Implemention-of-Dijkstra-Algorithm | 089aff6eeedc51c263ffb8786cb9c94a65111eb7 | 235ae1f0d71beb08f1286938eb8ade59083a4129 | refs/heads/master | 2021-01-17T16:03:06.119636 | 2017-06-26T10:18:32 | 2017-06-26T10:18:32 | 95,433,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('a4.jpg',0)
# Initiate FAST object with default values
fast = cv2.FastFeatureDetector()
# find and draw the keypoints
kp = fast.detect(img,None)
img2 = cv2.drawKeypoints(img, kp, color=(255,0,0))
# Print all default params
print "Threshold: ", fast.getInt('threshold')
print "nonmaxSuppression: ", fast.getBool('nonmaxSuppression')
print "Total Keypoints with nonmaxSuppression: ", len(kp)
cv2.imshow('fast_true',img2)
cv2.waitKey(0) & 0xFF
cv2.imwrite('fast_true.png',img2)
# Disable nonmaxSuppression
fast.setBool('nonmaxSuppression',0)
kp = fast.detect(img,None)
print "Total Keypoints without nonmaxSuppression: ", len(kp)
img3 = cv2.drawKeypoints(img, kp, color=(255,0,0))
cv2.imshow('fast_false',img3)
cv2.waitKey(0) & 0xFF
cv2.imwrite('fast_false.png',img3)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
f7f98ad9e65f953b1a284837286cdbdd17a0cbf5 | 9d14c4f796e9d3e10ade17f41ff1d150295b8aa0 | /forms.py | fc4ceb261a7fb2f21f8c45b8add01277a998317c | [] | no_license | chitboon/flaskr2 | e6c2fc7c776fdfa73461c2f65bb093f66ec08db5 | 2834ec4923673d97d666fa2da09ee54711534178 | refs/heads/master | 2022-12-27T23:45:36.915733 | 2021-01-21T03:26:23 | 2021-01-21T03:26:23 | 160,546,330 | 1 | 2 | null | 2022-12-08T03:00:28 | 2018-12-05T16:23:25 | HTML | UTF-8 | Python | false | false | 662 | py | from wtforms import StringField, IntegerField, TextAreaField, SubmitField, RadioField, SelectField, Form, PasswordField
from wtforms import validators, ValidationError
class LoginForm(Form):
id = StringField('UserName', [validators.DataRequired('Please enter your name.')])
password = PasswordField('Password', [validators.DataRequired('Please enter your password.')])
submit = SubmitField('Login')
class RegisterForm(Form):
id = StringField('UserName', [validators.DataRequired('Please enter your name.')])
password = PasswordField('Password', [validators.DataRequired('Please enter your password.')])
submit = SubmitField('Register')
| [
"chitboon@gmail.com"
] | chitboon@gmail.com |
b1971ec4f2d4825a4be4ba3638ca9f7548e9f7d1 | 29d7fd60a06e241068a2217510231c4fadcb338d | /src/patch2021ti10.py | 5b82f99f741a2efe9736325ffd9f22466d5a7f71 | [] | no_license | csj2018/KID_simulator | e633990b8a6f5d29d6861ee15faa067284b1e266 | d165aea8cdfb9b6dc4b549dd2e7c3843c83ffeca | refs/heads/master | 2023-08-27T05:25:57.607943 | 2021-10-07T12:20:38 | 2021-10-07T12:20:38 | 414,289,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | from src.support import *
club_list =[]
#club_list.append(Club('PSG.LGD'))
#club_list[len(club_list)-1].creat_player(['萧瑟', 'SoMnus丶M', 'Old Eleven', 'Fy', 'Xnova'],65,90)
#club_list.append(Club('Alliance'))
#club_list[len(club_list)-1].creat_player(['Nikobaby', 'Limmp', 's4', 'Handsken', 'fng'],65,90)
#club_list.append(Club('Evil Geniuses'))
#club_list[len(club_list)-1].creat_player(['Arteezy', 'Abed', 'iceiceice', 'Cr1t-', 'Fly'],65,90)
#club_list.append(Club('Invictus Gaming'))
#club_list[len(club_list)-1].creat_player(['flyfly', 'Emo', 'JT-', 'Kaka', 'Oli'],65,90)
path = os.getcwd()
patch_data = xlrd.open_workbook(path+'/src/patch2021ti10.xls')
patch_table = patch_data.sheets()[0]
row = patch_table.nrows
for i in range(1,row):
if patch_table.cell_value(i,0) == '队名':
club_list.append(Club(patch_table.cell_value(i,1)))
print(patch_table.cell_value(i,1) + " 战队加载中……")
club_list[len(club_list) - 1].add_player(patch_table.cell_value(i + 1, 1), patch_table.cell_value(i + 1, 2),
patch_table.cell_value(i + 1, 3))
club_list[len(club_list) - 1].add_player(patch_table.cell_value(i + 2, 1), patch_table.cell_value(i + 2, 2),
patch_table.cell_value(i + 2, 3))
club_list[len(club_list) - 1].add_player(patch_table.cell_value(i + 3, 1), patch_table.cell_value(i + 3, 2),
patch_table.cell_value(i + 3, 3))
club_list[len(club_list) - 1].add_player(patch_table.cell_value(i + 4, 1), patch_table.cell_value(i + 4, 2),
patch_table.cell_value(i + 4, 3))
club_list[len(club_list) - 1].add_player(patch_table.cell_value(i + 5, 1), patch_table.cell_value(i + 5, 2),
patch_table.cell_value(i + 5, 3))
| [
"506300081@qq.com"
] | 506300081@qq.com |
7fa882dc540662fffa8f714c6124767e6bb8b1a6 | 7118862c20c0b503f9e901026e48a809e29f5cf5 | /ar_markers/coding.py | 87df7bd2c25243aa1dfe07fe9b784377cd8a6788 | [
"BSD-3-Clause"
] | permissive | pstraczynski/ar_markers | 964c0405dd7b51ac12f6f4c042626514667f7324 | 408737244ef7a655607858a6852189d5aef02e9b | refs/heads/master | 2022-11-17T22:08:42.885805 | 2020-07-16T11:46:38 | 2020-07-16T11:46:38 | 280,138,112 | 0 | 0 | BSD-3-Clause | 2020-07-16T11:38:53 | 2020-07-16T11:38:52 | null | UTF-8 | Python | false | false | 2,924 | py | # this is all hamming code stuff, no user stuff here ... move along, move along
from numpy import matrix, array
GENERATOR_MATRIX = matrix([
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
REGENERATOR_MATRIX = matrix([
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
])
PARITY_CHECK_MATRIX = matrix([
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
])
HAMMINGCODE_MARKER_POSITIONS = [
[1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4], [2, 5],
[3, 1], [3, 2], [3, 3], [3, 4], [3, 5],
[4, 1], [4, 2], [4, 3], [4, 4], [4, 5],
[5, 2], [5, 3], [5, 4],
]
def encode(bits):
encoded_code = ''
if len(bits) % 4 != 0:
raise ValueError('Only a multiple of 4 as bits are allowed.')
while len(bits) >= 4:
four_bits = bits[:4]
bit_array = generate_bit_array(four_bits)
hamming_code = matrix_array_multiply_and_format(GENERATOR_MATRIX, bit_array)
encoded_code += ''.join(hamming_code)
bits = bits[4:]
return encoded_code
def decode(bits):
decoded_code = ''
if len(bits) % 7 != 0:
raise ValueError('Only a multiple of 7 as bits are allowed.')
for bit in bits:
if int(bit) not in [0, 1]:
raise ValueError('The provided bits contain other values that 0 or 1: %s' % bits)
while len(bits) >= 7:
seven_bits = bits[:7]
uncorrected_bit_array = generate_bit_array(seven_bits)
corrected_bit_array = parity_correct(uncorrected_bit_array)
decoded_bits = matrix_array_multiply_and_format(REGENERATOR_MATRIX, corrected_bit_array)
decoded_code += ''.join(decoded_bits)
bits = bits[7:]
return decoded_code
def parity_correct(bit_array):
# Check the parity using the PARITY_CHECK_MATRIX
checked_parity = matrix_array_multiply_and_format(PARITY_CHECK_MATRIX, bit_array)
parity_bits_correct = True
# every value as to be 0, so no error accoured:
for bit in checked_parity:
if int(bit) != 0:
parity_bits_correct = False
if not parity_bits_correct:
error_bit = int(''.join(checked_parity), 2)
for index, bit in enumerate(bit_array):
if error_bit == index + 1:
if bit == 0:
bit_array[index] = 1
else:
bit_array[index] = 0
return bit_array
def matrix_array_multiply_and_format(matrix, array):
unformated = matrix.dot(array).tolist()[0]
return [str(bit % 2) for bit in unformated]
def generate_bit_array(bits):
return array([int(bit) for bit in bits])
def extract_hamming_code(mat):
hamming_code = ''
for pos in HAMMINGCODE_MARKER_POSITIONS:
hamming_code += str(int(mat[pos[0], pos[1]]))
return hamming_code
| [
"walchko@users.noreply.github.com"
] | walchko@users.noreply.github.com |
5dea86eaad29c2989353630f5ff2bd269066b61c | 275252208c840919755e06029a298ae7f3344f39 | /option_system/test/test_case/scene_sta.py | e87a0df54c575951ebc19119b4386f4548ce4230 | [] | no_license | wanying0106/Test_system | a317c4d909ccf935f7d9c080fc5eb283a6d3ac82 | d28b1a02f7ad7477ce83644fac98d8fb5952f4e1 | refs/heads/master | 2020-03-30T04:02:42.047288 | 2019-06-29T06:33:05 | 2019-06-29T06:33:05 | 150,721,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | # coding: utf-8
from time import sleep
import unittest, sys
sys.path.append("./ models")
sys.path.append("./ page_obj")
from models.myunit import *
from models.function import *
from page_obj.loginPage import login
from page_obj.planPage import plan
from page_obj.switchPage import switch
from page_obj.employeesPage import employees
class grantProcessTest(MyTest):
def test_grantProcess(self, username = "dev@test.com", password = "abcd1234"):
# 登录系统
login(self.driver).user_login(username, password)
po = login(self.driver)
# insert_img(self.driver, "success_login1.png")
sleep(2)
self.assertEqual(po.login_success(), "您好,Kate")
'''
#切换至员工管理页面
switch(self.driver).switchTo_staffManage()
#添加员工
employees(self.driver).addEmployees(name = "测试1", state = "on", email = "123@tesr.com")
po = employees(self.driver)
sleep(2)
# self.assertEqual(po.addEmployees_success_hint(), "测试1")
'''
#切换至计划管理页面
switch(self.driver).switchTo_planManage()
sleep(2)
#授予新建员工期权/RSU
plan(self.driver).user_grant(member = "测试", matureType = "time", number = "1000")
sleep(2)
if __name__ == '__main__':
unittest.main()
| [
"15175089260@163.com"
] | 15175089260@163.com |
2297e424c5d15295bfb270d9c8020915f0f434ed | 5854b5b0541075a9fe9b93dcdb4611746bcd3aa6 | /investigate.py | 777e13998a49d44057ceb5b5394d689c395517c1 | [] | no_license | KeithWM/conditioned | 479ea66b70e1fe93d72f9c05bf14cf4b0a80920d | aff54fba8dfa6af0290bd8a4fe16afe92abeb41e | refs/heads/master | 2021-01-21T07:07:15.091972 | 2017-03-01T08:46:17 | 2017-03-01T08:46:17 | 83,318,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | import pandas
from matplotlib import pyplot as plt
dfs = ()
names = ()
names+= ('taylor',)
# names+= ('int',)
for name in names:
# dfs+=(pandas.read_csv('df_{}.csv'.format(name), index_col=0),)
df = pandas.read_csv('df_{}.csv'.format(name), index_col=0)
# df = pandas.concat(dfs, axis=0)
int_keys = ('N',)
float_keys = ('beta', 'gamma', 'upsilon', 'T')
keys = int_keys + float_keys
# keys = keys[:3]
# K = len(keys)
# if K%2 == 0:
# fig, axs = plt.subplots(K/2, K-1)
# else:
# fig, axs = plt.subplots((K-1)/2, K)
# axs = axs.flatten()
#
# df['mean_var_R_disc'] = df['mean_var_R'] - 1.25
#
# k=0
# for i, key1 in enumerate(keys[:-1]):
# for key2 in keys[i+1:]:
# df.plot.scatter(x=key1, y=key2, c='mean_var_R_disc', s=100, logx=True, logy=True, ax=axs[k], colormap='BrBG')
# k+= 1
keys = keys + ('tau',)
K = len(keys)
fig, axs = plt.subplots(1, K)
axs = axs.flatten()
df['mean_var_R_disc'] = abs(df['mean_var_R'] - 1.25)
df['tau'] = df['T']/df['N']
k=0
for i, key in enumerate(keys):
df.plot.scatter(x=key, y='mean_var_R', s=100, logx=True, logy=False, ax=axs[k])
k+= 1
fig, ax = plt.subplots(1, 1)
df.plot.scatter(x='tau', y='upsilon', c='mean_var_R', s=100, logx=True, logy=True, ax=ax)
plt.show() | [
"keith@myerscough.nl"
] | keith@myerscough.nl |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.