code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @author zack
* @description 人群标签
* @create 2024-12-28 11:42
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class CrowdTags {
/** 自增ID */
private Long id;
/** 人群ID */
private String tagId;
/** 人群名称 */
private String tagName;
/** 人群描述 */
private String tagDesc;
/** 人群标签统计量 */
private Integer statistics;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/CrowdTags.java
|
Java
|
unknown
| 706
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @author zack
* @description 人群标签明细
* @create 2024-12-28 11:43
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class CrowdTagsDetail {
/** 自增ID */
private Long id;
/** 人群ID */
private String tagId;
/** 用户ID */
private String userId;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/CrowdTagsDetail.java
|
Java
|
unknown
| 596
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @author zack
* @description 人群标签任务
* @create 2024-12-28 11:45
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class CrowdTagsJob {
/** 自增ID */
private Long id;
/** 标签ID */
private String tagId;
/** 批次ID */
private String batchId;
/** 标签类型(参与量、消费金额) */
private Integer tagType;
/** 标签规则(限定类型 N次) */
private String tagRule;
/** 统计数据,开始时间 */
private Date statStartTime;
/** 统计数据,结束时间 */
private Date statEndTime;
/** 状态;0初始、1计划(进入执行阶段)、2重置、3完成 */
private Integer status;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/CrowdTagsJob.java
|
Java
|
unknown
| 1,002
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @author zack
* @description 拼团活动
* @create 2024-12-07 10:01
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class GroupBuyActivity {
/** 自增 */
private Long id;
/** 活动ID */
private Long activityId;
/** 活动名称 */
private String activityName;
/** 折扣ID */
private String discountId;
/** 拼团方式(0自动成团、1达成目标拼团) */
private Integer groupType;
/** 拼团次数限制 */
private Integer takeLimitCount;
/** 拼团目标 */
private Integer target;
/** 拼团时长(分钟) */
private Integer validTime;
/** 活动状态(0创建、1生效、2过期、3废弃) */
private Integer status;
/** 活动开始时间 */
private Date startTime;
/** 活动结束时间 */
private Date endTime;
/** 人群标签规则标识 */
private String tagId;
/** 人群标签规则范围 */
private String tagScope;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
public static String cacheRedisKey(Long activityId) {
return "group_buy_market_zack.project.infrastructure.dao.po.GroupBuyActivity_" + activityId;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/GroupBuyActivity.java
|
Java
|
unknown
| 1,436
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @author zack
* @description 折扣配置
* @create 2024-12-07 10:06
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class GroupBuyDiscount {
/**
* 自增ID
*/
private Long id;
/**
* 折扣ID
*/
private String discountId;
/**
* 折扣标题
*/
private String discountName;
/**
* 折扣描述
*/
private String discountDesc;
/**
* 折扣类型(0:base、1:tag)
*/
private Integer discountType;
/**
* 营销优惠计划(ZJ:直减、MJ:满减、N元购)
*/
private String marketPlan;
/**
* 营销优惠表达式
*/
private String marketExpr;
/**
* 人群标签,特定优惠限定
*/
private String tagId;
/**
* 创建时间
*/
private Date createTime;
/**
* 更新时间
*/
private Date updateTime;
public static String cacheRedisKey(String discountId) {
return "group_buy_market_zack.project.infrastructure.dao.po.GroupBuyDiscount_" + discountId;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/GroupBuyDiscount.java
|
Java
|
unknown
| 1,271
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.math.BigDecimal;
import java.util.Date;
/**
* @author zack
* @description 用户拼单
* @create 2025-01-11 10:29
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class GroupBuyOrder {
/** 自增ID */
private Long id;
/** 拼单组队ID */
private String teamId;
/** 活动ID */
private Long activityId;
/** 渠道 */
private String source;
/** 来源 */
private String channel;
/** 原始价格 */
private BigDecimal originalPrice;
/** 折扣金额 */
private BigDecimal deductionPrice;
/** 支付价格 */
private BigDecimal payPrice;
/** 目标数量 */
private Integer targetCount;
/** 完成数量 */
private Integer completeCount;
/** 锁单数量 */
private Integer lockCount;
/** 状态(0-拼单中、1-完成、2-失败) */
private Integer status;
/** 拼团开始时间 - 参与拼团时间 */
private Date validStartTime;
/** 拼团结束时间 - 拼团有效时长 */
private Date validEndTime;
/** 回调类型 HTTP、MQ */
private String notifyType;
/** 回调通知(HTTP 方式回调,地址不可为空) */
private String notifyUrl;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/GroupBuyOrder.java
|
Java
|
unknown
| 1,484
|
package zack.project.infrastructure.dao.po;
import zack.project.infrastructure.dao.po.base.Page;
import lombok.*;
import java.math.BigDecimal;
import java.util.Date;
/**
* @author zack
* @description 用户拼单明细
* @create 2025-01-11 08:42
*/
@EqualsAndHashCode(callSuper = true)
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class GroupBuyOrderList extends Page {
/** 自增ID */
private Long id;
/** 用户ID */
private String userId;
/** 拼单组队ID */
private String teamId;
/** 订单ID */
private String orderId;
/** 活动ID */
private Long activityId;
/** 活动开始时间 */
private Date startTime;
/** 活动结束时间 */
private Date endTime;
/** 商品ID */
private String goodsId;
/** 渠道 */
private String source;
/** 来源 */
private String channel;
/** 原始价格 */
private BigDecimal originalPrice;
/** 折扣金额 */
private BigDecimal deductionPrice;
/** 支付金额 */
private BigDecimal payPrice;
/** 状态;0初始锁定、1消费完成 */
private Integer status;
/** 外部交易单号-确保外部调用唯一幂等 */
private String outTradeNo;
/** 外部交易时间 */
private Date outTradeTime;
/** 唯一业务ID */
private String bizId;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/GroupBuyOrderList.java
|
Java
|
unknown
| 1,459
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @description 通知回调任务
* @create 2025-01-26 18:19
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class NotifyTask {
/** 自增ID */
private Long id;
/** 活动ID */
private Long activityId;
/** 拼单组队ID */
private String teamId;
/** 回调种类(跟业务相关)*/
private String notifyCategory;
/** 回调类型(MQ/HTTP) */
private String notifyType;
/** 回调消息 */
private String notifyMQ;
/** 回调接口 */
private String notifyUrl;
/** rpc调用接口 */
private String notifyRpc;
/** 回调次数 */
private Integer notifyCount;
/** 回调状态【0初始、1完成、2重试、3失败】 */
private Integer notifyStatus;
/** 参数对象 */
private String parameterJson;
/** 唯一标识 */
private String uuid;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/NotifyTask.java
|
Java
|
unknown
| 1,152
|
package zack.project.infrastructure.dao.po;
public class Page {
int count;
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/Page.java
|
Java
|
unknown
| 212
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @author zack
* @description 渠道商品活动配置关联表
* @create 2025-01-01 09:27
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class SCSkuActivity {
/** 自增ID */
private Long id;
/** 渠道 */
private String source;
/** 来源 */
private String channel;
/** 活动ID */
private Long activityId;
/** 商品ID */
private String goodsId;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/SCSkuActivity.java
|
Java
|
unknown
| 704
|
package zack.project.infrastructure.dao.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.math.BigDecimal;
import java.util.Date;
/**
* @author zack
* @description 商品信息
* @create 2024-12-21 10:45
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class Sku {
/** 自增 */
private Long id;
/** 来源 */
private String source;
/** 渠道 */
private String channel;
/** 商品ID */
private String goodsId;
/** 商品名称 */
private String goodsName;
/** 原始价格 */
private BigDecimal originalPrice;
/** 创建时间 */
private Date createTime;
/** 更新时间 */
private Date updateTime;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/Sku.java
|
Java
|
unknown
| 767
|
package zack.project.infrastructure.dao.po.base;
public class Page {
private Integer count;
public Integer getCount() {
return count;
}
public void setCount(Integer count) {
this.count = count;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dao/po/base/Page.java
|
Java
|
unknown
| 238
|
package zack.project.infrastructure.dcc;
import zack.project.types.common.Constants;
import cn.bugstack.wrench.dynamic.config.center.types.annotations.DCCValue;
import org.springframework.stereotype.Service;
import java.util.Arrays;
import java.util.List;
/**
* @description 动态配置服务
* @create 2025-01-03 15:38
*/
@Service
public class DCCService {
/**
* 降级开关 0关闭、1开启
*/
@DCCValue("downgradeSwitch:0")
private String downgradeSwitch;
@DCCValue("cutRange:100")
private String cutRange;
@DCCValue("scBlacklist:s02c02")
private String scBlacklist;
@DCCValue("cacheSwitch:0")
private String cacheOpenSwitch;
public boolean isDowngradeSwitch() {
return "1".equals(downgradeSwitch);
}
public boolean isCutRange(String userId) {
// 计算哈希码的绝对值
int hashCode = Math.abs(userId.hashCode());
// 获取最后两位
int lastTwoDigits = hashCode % 100;
// 判断是否在切量范围内
if (lastTwoDigits <= Integer.parseInt(cutRange)) {
return true;
}
return false;
}
/**
* 判断黑名单拦截渠道,true 拦截、false 放行
*/
public boolean isSCBlackIntercept(String source, String channel) {
List<String> list = Arrays.asList(scBlacklist.split(Constants.SPLIT));
return list.contains(source + channel);
}
/**
* 缓存开启开关,true为开启,1为关闭
*/
public boolean isCacheOpenSwitch(){
return "0".equals(cacheOpenSwitch);
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/dcc/DCCService.java
|
Java
|
unknown
| 1,610
|
package zack.project.infrastructure.event;
import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.core.MessageDeliveryMode;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
/**
* @author zack
* @description 消息发送
* @create 2024-03-30 12:40
*/
@Slf4j
@Component
public class EventPublisher {
@Autowired
private RabbitTemplate rabbitTemplate;
@Value("${spring.rabbitmq.config.producer.exchange}")
private String exchangeName;
public void publish(String routingKey, String message) {
try {
rabbitTemplate.convertAndSend(exchangeName, routingKey, message, m -> {
// 持久化消息配置
m.getMessageProperties().setDeliveryMode(MessageDeliveryMode.PERSISTENT);
return m;
});
} catch (Exception e) {
log.error("发送MQ消息失败 team_success message:{}", message, e);
throw e;
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/event/EventPublisher.java
|
Java
|
unknown
| 1,136
|
package zack.project.infrastructure.gateway;
import zack.project.types.enums.ResponseCode;
import zack.project.types.exception.AppException;
import lombok.extern.slf4j.Slf4j;
import okhttp3.*;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
/**
* @description 拼团回调服务
* @create 2025-01-31 09:12
*/
@Slf4j
@Service
public class GroupBuyNotifyService {
@Resource
private OkHttpClient okHttpClient;
public String groupBuyNotify(String apiUrl, String notifyRequestDTOJSON) throws Exception {
try {
// 1. 构建参数
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType, notifyRequestDTOJSON);
Request request = new Request.Builder()
.url(apiUrl)
.post(body)
.addHeader("content-type", "application/json")
.build();
// 2. 调用接口
Response response = okHttpClient.newCall(request).execute();
// 3. 返回结果
return response.body().string();
} catch (Exception e) {
log.error("拼团回调 HTTP 接口服务异常 {}", apiUrl, e);
throw new AppException(ResponseCode.HTTP_EXCEPTION);
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/gateway/GroupBuyNotifyService.java
|
Java
|
unknown
| 1,341
|
package zack.project.infrastructure.gateway;
import zack.project.trigger.api.ILotteryService;
import zack.project.trigger.api.dto.LotteryBaseRequestDTO;
import zack.project.trigger.api.dto.LotteryResponseDTO;
import zack.project.trigger.api.request.Request;
import zack.project.trigger.api.response.Response;
import zack.project.types.enums.ResponseCode;
import zack.project.types.exception.AppException;
import lombok.extern.slf4j.Slf4j;
import org.apache.dubbo.config.annotation.Reference;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.Map;
/**
* @author A1793
*/
@Service
@Slf4j
public class LotteryRpcService {
@Reference(version = "1.0",
check = false,
mock = "return \" RemoteService:lottery not available right now.\"")
zack.project.trigger.api.ILotteryService lotteryService;
public String lottery(Integer takeCount, Long strategyId) {
try{
LotteryBaseRequestDTO lotteryBaseRequestDTO = new LotteryBaseRequestDTO();
Map<String, Object> extParams = new HashMap<String, Object>();
extParams.put("takeCount", takeCount);
lotteryBaseRequestDTO.setStrategyId(strategyId);
lotteryBaseRequestDTO.setExtParams(extParams);
lotteryBaseRequestDTO.setBizType("lUCKRATE");
Request<LotteryBaseRequestDTO> request = new Request<>("group", "1", lotteryBaseRequestDTO);
Response<LotteryResponseDTO> response = lotteryService.lottery(request);
LotteryResponseDTO data = response.getData();
log.info("营销抽奖接口调用成功 随机概率:{}",data.getAwardResult());
return data.getAwardResult();
}catch (Exception e){
log.error("抽奖接口调用失败 错误信息:{} ", e.getMessage());
throw new AppException(ResponseCode.RPC_EXCEPTION);
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/gateway/LotteryRpcService.java
|
Java
|
unknown
| 1,901
|
package zack.project.infrastructure.gateway;
import zack.project.trigger.api.IRebateService;
import zack.project.trigger.api.dto.RebateBehaviorDTO;
import zack.project.trigger.api.request.Request;
import zack.project.trigger.api.response.Response;
import zack.project.types.enums.NotifyTaskHTTPEnumVO;
import zack.project.types.enums.ResponseCode;
import zack.project.types.exception.AppException;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.TypeReference;
import lombok.extern.slf4j.Slf4j;
import org.apache.dubbo.config.annotation.Reference;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.Map;
/**
* @author A1793
*/
@Service
@Slf4j
public class RebateRpcService {
@Reference(version = "1.0",
check = false,
mock = "return \" RemoteService:rebate not available right now.\"")
IRebateService rebateService;
public String sendRebate(String notifyInterface, String parameterJson) {
try {
Map<String, Object> parameterMap = JSON.parseObject(parameterJson, new TypeReference<HashMap<String, Object>>() {
});
String behaviorType = (String) parameterMap.get("behaviorType");
String userId = (String) parameterMap.get("userId");
String outTradeNo = (String) parameterMap.get("outTradeNo");
Request<RebateBehaviorDTO> request = Request.<RebateBehaviorDTO>builder()
.appId("group" + userId).
appToken("group" + userId)
.data(RebateBehaviorDTO.builder()
.userId(userId)
.outTradeNo(outTradeNo)
.behaviorType(behaviorType)
.build()
).build();
Response response = rebateService.sendRebate(request);
if (response.getData().equals("false")) {
return NotifyTaskHTTPEnumVO.ERROR.getCode();
}
return NotifyTaskHTTPEnumVO.SUCCESS.getCode();
} catch (Exception e) {
log.error("拼团回调 RPC 接口服务异常{}", notifyInterface, e);
throw new AppException(ResponseCode.RPC_EXCEPTION);
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/gateway/RebateRpcService.java
|
Java
|
unknown
| 2,319
|
package zack.project.infrastructure.gateway.dto;
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/gateway/dto/package-info.java
|
Java
|
unknown
| 48
|
/**
* 定义http、rpc接口,调用外部。在 adapter 中调用这部分内容。
*/
package zack.project.infrastructure.gateway;
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/gateway/package-info.java
|
Java
|
unknown
| 135
|
package zack.project.infrastructure.redis;
import org.redisson.api.*;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.TimeUnit;
/**
* Redis 服务
*
* @author zack
*/
public interface IRedisService {
/**
* 设置指定 key 的值
*
* @param key 键
* @param value 值
*/
<T> void setValue(String key, T value);
/**
* 设置指定 key 的值
*
* @param key 键
* @param value 值
* @param expired 过期时间
*/
<T> void setValue(String key, T value, long expired);
/**
* 获取指定 key 的值
*
* @param key 键
* @return 值
*/
<T> T getValue(String key);
/**
* 获取队列
*
* @param key 键
* @param <T> 泛型
* @return 队列
*/
<T> RQueue<T> getQueue(String key);
/**
* 加锁队列
*
* @param key 键
* @param <T> 泛型
* @return 队列
*/
<T> RBlockingQueue<T> getBlockingQueue(String key);
/**
* 延迟队列
*
* @param rBlockingQueue 加锁队列
* @param <T> 泛型
* @return 队列
*/
<T> RDelayedQueue<T> getDelayedQueue(RBlockingQueue<T> rBlockingQueue);
/**
* 设置值
*
* @param key key 键
* @param value 值
*/
void setAtomicLong(String key, long value);
/**
* 获取值
*
* @param key key 键
*/
Long getAtomicLong(String key);
/**
* 自增 Key 的值;1、2、3、4
*
* @param key 键
* @return 自增后的值
*/
long incr(String key);
/**
* 指定值,自增 Key 的值;1、2、3、4
*
* @param key 键
* @return 自增后的值
*/
long incrBy(String key, long delta);
/**
* 自减 Key 的值;1、2、3、4
*
* @param key 键
* @return 自增后的值
*/
long decr(String key);
/**
* 指定值,自增 Key 的值;1、2、3、4
*
* @param key 键
* @return 自增后的值
*/
long decrBy(String key, long delta);
/**
* 移除指定 key 的值
*
* @param key 键
*/
void remove(String key);
/**
* 判断指定 key 的值是否存在
*
* @param key 键
* @return true/false
*/
boolean isExists(String key);
/**
* 将指定的值添加到集合中
*
* @param key 键
* @param value 值
*/
void addToSet(String key, String value);
/**
* 判断指定的值是否是集合的成员
*
* @param key 键
* @param value 值
* @return 如果是集合的成员返回 true,否则返回 false
*/
boolean isSetMember(String key, String value);
/**
* 将指定的值添加到列表中
*
* @param key 键
* @param value 值
*/
void addToList(String key, String value);
/**
* 获取列表中指定索引的值
*
* @param key 键
* @param index 索引
* @return 值
*/
String getFromList(String key, int index);
/**
* 获取Map
*
* @param key 键
* @return 值
*/
<K, V> RMap<K, V> getMap(String key);
/**
* 将指定的键值对添加到哈希表中
*
* @param key 键
* @param field 字段
* @param value 值
*/
void addToMap(String key, String field, String value);
/**
* 获取哈希表中指定字段的值
*
* @param key 键
* @param field 字段
* @return 值
*/
String getFromMap(String key, String field);
/**
* 获取哈希表中指定字段的值
*
* @param key 键
* @param field 字段
* @return 值
*/
<K, V> V getFromMap(String key, K field);
/**
* 将指定的值添加到有序集合中
*
* @param key 键
* @param value 值
*/
void addToSortedSet(String key, String value);
/**
* 获取 Redis 锁(可重入锁)
*
* @param key 键
* @return Lock
*/
RLock getLock(String key);
/**
* 获取 Redis 锁(公平锁)
*
* @param key 键
* @return Lock
*/
RLock getFairLock(String key);
/**
* 获取 Redis 锁(读写锁)
*
* @param key 键
* @return RReadWriteLock
*/
RReadWriteLock getReadWriteLock(String key);
/**
* 获取 Redis 信号量
*
* @param key 键
* @return RSemaphore
*/
RSemaphore getSemaphore(String key);
/**
* 获取 Redis 过期信号量
* <p>
* 基于Redis的Redisson的分布式信号量(Semaphore)Java对象RSemaphore采用了与java.util.concurrent.Semaphore相似的接口和用法。
* 同时还提供了异步(Async)、反射式(Reactive)和RxJava2标准的接口。
*
* @param key 键
* @return RPermitExpirableSemaphore
*/
RPermitExpirableSemaphore getPermitExpirableSemaphore(String key);
/**
* 闭锁
*
* @param key 键
* @return RCountDownLatch
*/
RCountDownLatch getCountDownLatch(String key);
/**
* 布隆过滤器
*
* @param key 键
* @param <T> 存放对象
* @return 返回结果
*/
<T> RBloomFilter<T> getBloomFilter(String key);
Boolean setNx(String key);
Boolean setNx(String key, long expired, TimeUnit timeUnit);
RBitSet getBitSet(String key);
default int getIndexFromUserId(String userId) {
try {
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] hashBytes = md.digest(userId.getBytes(StandardCharsets.UTF_8));
// 将哈希字节数组转换为正整数
BigInteger bigInt = new BigInteger(1, hashBytes);
// 取模以确保索引在合理范围内
return bigInt.mod(BigInteger.valueOf(Integer.MAX_VALUE)).intValue();
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("MD5 algorithm not found", e);
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/redis/IRedisService.java
|
Java
|
unknown
| 6,209
|
package zack.project.infrastructure.redis;
import org.redisson.api.*;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.time.Duration;
import java.util.concurrent.TimeUnit;
/**
* Redis 服务 - Redisson
*
* @author zack
*/
@Service("redissonService")
public class RedissonService implements IRedisService {
@Resource
private RedissonClient redissonClient;
public <T> void setValue(String key, T value) {
redissonClient.<T>getBucket(key).set(value);
}
@Override
public <T> void setValue(String key, T value, long expired) {
RBucket<T> bucket = redissonClient.getBucket(key);
bucket.set(value, Duration.ofMillis(expired));
}
public <T> T getValue(String key) {
return redissonClient.<T>getBucket(key).get();
}
@Override
public <T> RQueue<T> getQueue(String key) {
return redissonClient.getQueue(key);
}
@Override
public <T> RBlockingQueue<T> getBlockingQueue(String key) {
return redissonClient.getBlockingQueue(key);
}
@Override
public <T> RDelayedQueue<T> getDelayedQueue(RBlockingQueue<T> rBlockingQueue) {
return redissonClient.getDelayedQueue(rBlockingQueue);
}
@Override
public void setAtomicLong(String key, long value) {
redissonClient.getAtomicLong(key).set(value);
}
@Override
public Long getAtomicLong(String key) {
return redissonClient.getAtomicLong(key).get();
}
@Override
public long incr(String key) {
return redissonClient.getAtomicLong(key).incrementAndGet();
}
@Override
public long incrBy(String key, long delta) {
return redissonClient.getAtomicLong(key).addAndGet(delta);
}
@Override
public long decr(String key) {
return redissonClient.getAtomicLong(key).decrementAndGet();
}
@Override
public long decrBy(String key, long delta) {
return redissonClient.getAtomicLong(key).addAndGet(-delta);
}
@Override
public void remove(String key) {
redissonClient.getBucket(key).delete();
}
@Override
public boolean isExists(String key) {
return redissonClient.getBucket(key).isExists();
}
public void addToSet(String key, String value) {
RSet<String> set = redissonClient.getSet(key);
set.add(value);
}
public boolean isSetMember(String key, String value) {
RSet<String> set = redissonClient.getSet(key);
return set.contains(value);
}
public void addToList(String key, String value) {
RList<String> list = redissonClient.getList(key);
list.add(value);
}
public String getFromList(String key, int index) {
RList<String> list = redissonClient.getList(key);
return list.get(index);
}
@Override
public <K, V> RMap<K, V> getMap(String key) {
return redissonClient.getMap(key);
}
public void addToMap(String key, String field, String value) {
RMap<String, String> map = redissonClient.getMap(key);
map.put(field, value);
}
public String getFromMap(String key, String field) {
RMap<String, String> map = redissonClient.getMap(key);
return map.get(field);
}
@Override
public <K, V> V getFromMap(String key, K field) {
return redissonClient.<K, V>getMap(key).get(field);
}
public void addToSortedSet(String key, String value) {
RSortedSet<String> sortedSet = redissonClient.getSortedSet(key);
sortedSet.add(value);
}
@Override
public RLock getLock(String key) {
return redissonClient.getLock(key);
}
@Override
public RLock getFairLock(String key) {
return redissonClient.getFairLock(key);
}
@Override
public RReadWriteLock getReadWriteLock(String key) {
return redissonClient.getReadWriteLock(key);
}
@Override
public RSemaphore getSemaphore(String key) {
return redissonClient.getSemaphore(key);
}
@Override
public RPermitExpirableSemaphore getPermitExpirableSemaphore(String key) {
return redissonClient.getPermitExpirableSemaphore(key);
}
@Override
public RCountDownLatch getCountDownLatch(String key) {
return redissonClient.getCountDownLatch(key);
}
@Override
public <T> RBloomFilter<T> getBloomFilter(String key) {
return redissonClient.getBloomFilter(key);
}
@Override
public Boolean setNx(String key) {
return redissonClient.getBucket(key).trySet("lock");
}
@Override
public Boolean setNx(String key, long expired, TimeUnit timeUnit) {
return redissonClient.getBucket(key).trySet("lock", expired, timeUnit);
}
@Override
public RBitSet getBitSet(String key) {
return redissonClient.getBitSet(key);
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-infrastructure/src/main/java/zack/project/infrastructure/redis/RedissonService.java
|
Java
|
unknown
| 4,890
|
package zack.project.trigger.http;
import cn.bugstack.wrench.dynamic.config.center.domain.model.valobj.AttributeVO;
import zack.project.api.IDCCService;
import zack.project.api.response.Response;
import zack.project.types.enums.ResponseCode;
import lombok.extern.slf4j.Slf4j;
import org.redisson.api.RTopic;
import org.springframework.web.bind.annotation.*;
import javax.annotation.Resource;
/**
* @description 动态配置管理
* @create 2025-01-03 19:16
*/
@Slf4j
@RestController()
@CrossOrigin("*")
@RequestMapping("/api/v1/gbm/dcc/")
public class DCCController implements IDCCService {
@Resource(name = "dynamicConfigCenterRedisTopic")
private RTopic dccTopic;
@RequestMapping(value = "update_config", method = RequestMethod.GET)
@Override
public Response<Boolean> updateConfig(@RequestParam String key, @RequestParam String value) {
try {
log.info("DCC 动态配置值变更 key:{} value:{}", key, value);
dccTopic.publish(new AttributeVO(key, value));
return Response.<Boolean>builder()
.code(ResponseCode.SUCCESS.getCode())
.info(ResponseCode.SUCCESS.getInfo())
.build();
} catch (Exception e) {
log.error("DCC 动态配置值变更失败 key:{} value:{}", key, value, e);
return Response.<Boolean>builder()
.code(ResponseCode.UN_ERROR.getCode())
.info(ResponseCode.UN_ERROR.getInfo())
.build();
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/http/DCCController.java
|
Java
|
unknown
| 1,549
|
package zack.project.trigger.http;
import zack.project.api.IMarketIndexService;
import zack.project.api.dto.GoodsMarketRequestDTO;
import zack.project.api.dto.GoodsMarketResponseDTO;
import zack.project.api.response.Response;
import zack.project.domain.activity.model.entity.MarketProductEntity;
import zack.project.domain.activity.model.entity.TrialBalanceEntity;
import zack.project.domain.activity.model.entity.UserGroupBuyOrderDetailEntity;
import zack.project.domain.activity.model.valobj.GroupBuyActivityDiscountVO;
import zack.project.domain.activity.model.valobj.TeamStatisticVO;
import zack.project.domain.activity.service.IIndexGroupBuyMarketService;
import zack.project.types.enums.ResponseCode;
import cn.bugstack.wrench.rate.limiter.types.annotations.RateLimiterAccessInterceptor;
import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.bind.annotation.*;
import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
* @description 营销首页服务
* @create 2025-02-02 16:03
*/
@Slf4j
@RestController()
@CrossOrigin("*")
@RequestMapping("/api/v1/gbm/index/")
public class MarketIndexController implements IMarketIndexService {
@Resource
private IIndexGroupBuyMarketService indexGroupBuyMarketService;
@RateLimiterAccessInterceptor(key = "userId", fallbackMethod = "queryGroupBuyMarketConfigFallBack", permitsPerSecond = 1.0d, blacklistCount = 1)
@RequestMapping(value = "query_group_buy_market_config", method = RequestMethod.POST)
@Override
public Response<GoodsMarketResponseDTO> queryGroupBuyMarketConfig(@RequestBody GoodsMarketRequestDTO requestDTO) {
try {
log.info("查询拼团营销配置开始:{} goodsId:{}", requestDTO.getUserId(), requestDTO.getGoodsId());
if (StringUtils.isBlank(requestDTO.getUserId()) || StringUtils.isBlank(requestDTO.getSource()) || StringUtils.isBlank(requestDTO.getChannel()) || StringUtils.isBlank(requestDTO.getGoodsId())) {
return Response.<GoodsMarketResponseDTO>builder()
.code(ResponseCode.ILLEGAL_PARAMETER.getCode())
.info(ResponseCode.ILLEGAL_PARAMETER.getInfo())
.build();
}
// 1. 营销优惠试算
TrialBalanceEntity trialBalanceEntity = indexGroupBuyMarketService.indexMarketTrial(MarketProductEntity.builder()
.userId(requestDTO.getUserId())
.source(requestDTO.getSource())
.channel(requestDTO.getChannel())
.goodsId(requestDTO.getGoodsId())
.build());
GroupBuyActivityDiscountVO groupBuyActivityDiscountVO = trialBalanceEntity.getGroupBuyActivityDiscountVO();
Long activityId = groupBuyActivityDiscountVO.getActivityId();
// 2. 查询拼团组队
List<UserGroupBuyOrderDetailEntity> userGroupBuyOrderDetailEntities = indexGroupBuyMarketService.queryInProgressUserGroupBuyOrderDetailList(activityId, requestDTO.getUserId(), 1, 2);
// 3. 统计拼团数据
TeamStatisticVO teamStatisticVO = indexGroupBuyMarketService.queryTeamStatisticByActivityId(activityId);
GoodsMarketResponseDTO.Goods goods = GoodsMarketResponseDTO.Goods.builder()
.goodsId(trialBalanceEntity.getGoodsId())
.originalPrice(trialBalanceEntity.getOriginalPrice())
.deductionPrice(trialBalanceEntity.getDeductionPrice())
.payPrice(trialBalanceEntity.getPayPrice())
.build();
List<GoodsMarketResponseDTO.Team> teams = new ArrayList<>();
if (null != userGroupBuyOrderDetailEntities && !userGroupBuyOrderDetailEntities.isEmpty()) {
for (UserGroupBuyOrderDetailEntity userGroupBuyOrderDetailEntity : userGroupBuyOrderDetailEntities) {
GoodsMarketResponseDTO.Team team = GoodsMarketResponseDTO.Team.builder()
.userId(userGroupBuyOrderDetailEntity.getUserId())
.teamId(userGroupBuyOrderDetailEntity.getTeamId())
.activityId(userGroupBuyOrderDetailEntity.getActivityId())
.targetCount(userGroupBuyOrderDetailEntity.getTargetCount())
.completeCount(userGroupBuyOrderDetailEntity.getCompleteCount())
.lockCount(userGroupBuyOrderDetailEntity.getLockCount())
.validStartTime(userGroupBuyOrderDetailEntity.getValidStartTime())
.validEndTime(userGroupBuyOrderDetailEntity.getValidEndTime())
.validTimeCountdown(GoodsMarketResponseDTO.Team.differenceDateTime2Str(new Date(), userGroupBuyOrderDetailEntity.getValidEndTime()))
.outTradeNo(userGroupBuyOrderDetailEntity.getOutTradeNo())
.build();
teams.add(team);
}
}
GoodsMarketResponseDTO.TeamStatistic teamStatistic = GoodsMarketResponseDTO.TeamStatistic.builder()
.allTeamCount(teamStatisticVO.getAllTeamCount())
.allTeamCompleteCount(teamStatisticVO.getAllTeamCompleteCount())
.allTeamUserCount(teamStatisticVO.getAllTeamUserCount())
.build();
Response<GoodsMarketResponseDTO> response = Response.<GoodsMarketResponseDTO>builder()
.code(ResponseCode.SUCCESS.getCode())
.info(ResponseCode.SUCCESS.getInfo())
.data(GoodsMarketResponseDTO.builder()
.activityId(activityId)
.goods(goods)
.teamList(teams)
.teamStatistic(teamStatistic)
.build())
.build();
log.info("查询拼团营销配置完成:{} goodsId:{} response:{}", requestDTO.getUserId(), requestDTO.getGoodsId(), JSON.toJSONString(response));
return response;
} catch (Exception e) {
log.error("查询拼团营销配置失败:{} goodsId:{}", requestDTO.getUserId(), requestDTO.getGoodsId(), e);
return Response.<GoodsMarketResponseDTO>builder()
.code(ResponseCode.UN_ERROR.getCode())
.info(ResponseCode.UN_ERROR.getInfo())
.build();
}
}
public Response<GoodsMarketResponseDTO> queryGroupBuyMarketConfigFallBack(@RequestBody GoodsMarketRequestDTO requestDTO) {
log.error("查询拼团营销配置限流:{}", requestDTO.getUserId());
return Response.<GoodsMarketResponseDTO>builder()
.code(ResponseCode.RATE_LIMITER.getCode())
.info(ResponseCode.RATE_LIMITER.getInfo())
.build();
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/http/MarketIndexController.java
|
Java
|
unknown
| 7,076
|
package zack.project.trigger.http;
import zack.project.api.IMarketTradeService;
import zack.project.api.dto.*;
import zack.project.api.response.Response;
import zack.project.domain.activity.model.entity.MarketProductEntity;
import zack.project.domain.activity.model.entity.TrialBalanceEntity;
import zack.project.domain.activity.model.valobj.GroupBuyActivityDiscountVO;
import zack.project.domain.activity.service.IIndexGroupBuyMarketService;
import zack.project.domain.trade.model.entity.*;
import zack.project.domain.trade.model.valobj.GroupBuyProgressVO;
import zack.project.domain.trade.model.valobj.NotifyConfigVO;
import zack.project.domain.trade.model.valobj.NotifyTypeEnumVO;
import zack.project.domain.trade.model.valobj.TradeOrderStatusEnumVO;
import zack.project.domain.trade.service.ITradeLockOrderService;
import zack.project.domain.trade.service.ITradeRefundOrderService;
import zack.project.domain.trade.service.ITradeSettlementOrderService;
import zack.project.types.enums.ResponseCode;
import zack.project.types.exception.AppException;
import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.bind.annotation.*;
import javax.annotation.Resource;
import java.util.Objects;
/**
* @author A1793
* @description 营销交易服务
* @create 2025-01-11 14:01
*/
@Slf4j
@RestController()
@CrossOrigin("*")
@RequestMapping("/api/v1/gbm/trade/")
public class MarketTradeController implements IMarketTradeService {
@Resource
private IIndexGroupBuyMarketService indexGroupBuyMarketService;
@Resource
private ITradeLockOrderService tradeOrderService;
@Resource
private ITradeSettlementOrderService tradeSettlementOrderService;
@Resource
private ITradeRefundOrderService tradeRefundOrderService;
/**
* 拼团营销锁单
* 参数校验 -> 看是否该userId和outTradeNo是否有未支付的锁单记录 -> 查看拼团队伍进度 -> 根据sc,goodsId,activityId进行优惠试算 —> 进行锁单
*/
@RequestMapping(value = "lock_market_pay_order", method = RequestMethod.POST)
@Override
public Response<LockMarketPayOrderResponseDTO> lockMarketPayOrder(@RequestBody LockMarketPayOrderRequestDTO requestDTO) {
try {
// 参数
String userId = requestDTO.getUserId();
String source = requestDTO.getSource();
String channel = requestDTO.getChannel();
String goodsId = requestDTO.getGoodsId();
Long activityId = requestDTO.getActivityId();
String outTradeNo = requestDTO.getOutTradeNo();
String teamId = requestDTO.getTeamId();
LockMarketPayOrderRequestDTO.NotifyConfigVO notifyConfigVO = requestDTO.getNotifyConfigVO();
log.info("营销交易锁单:{} LockMarketPayOrderRequestDTO:{}", userId, JSON.toJSONString(requestDTO));
//后端对前端的数据要进行检验
if (StringUtils.isBlank(userId) || StringUtils.isBlank(source) || StringUtils.isBlank(channel) || StringUtils.isBlank(goodsId) || null == activityId || ("HTTP".equals(notifyConfigVO.getNotifyType()) && StringUtils.isBlank(notifyConfigVO.getNotifyUrl()))) {
return Response.<LockMarketPayOrderResponseDTO>builder()
.code(ResponseCode.ILLEGAL_PARAMETER.getCode())
.info(ResponseCode.ILLEGAL_PARAMETER.getInfo())
.build();
}
// 查询 outTradeNo 是否已经存在交易记录
//查询数据库表{group_buy_order_list},看是否该外部单号和用户id是否有未支付的拼团详单
MarketPayOrderEntity marketPayOrderEntity =
tradeOrderService.queryNoPayMarketPayOrderByOutTradeNo(userId, outTradeNo);
if (null != marketPayOrderEntity &&
TradeOrderStatusEnumVO.CREATE.equals(marketPayOrderEntity.getTradeOrderStatusEnumVO())) {
LockMarketPayOrderResponseDTO lockMarketPayOrderResponseDTO = LockMarketPayOrderResponseDTO.builder()
.orderId(marketPayOrderEntity.getOrderId())
.originalPrice(marketPayOrderEntity.getOriginalPrice())
.deductionPrice(marketPayOrderEntity.getDeductionPrice())
.payPrice(marketPayOrderEntity.getPayPrice())
.tradeOrderStatus(marketPayOrderEntity.getTradeOrderStatusEnumVO().getCode())
.build();
log.info("交易锁单记录(存在):{} marketPayOrderEntity:{}", userId, JSON.toJSONString(marketPayOrderEntity));
return Response.<LockMarketPayOrderResponseDTO>builder()
.code(ResponseCode.SUCCESS.getCode())
.info(ResponseCode.SUCCESS.getInfo())
.data(lockMarketPayOrderResponseDTO)
.build();
}
// 判断拼团锁单是否完成了目标,如果是发起拼团则temId是空,不用查询拼团进度
if (StringUtils.isNotBlank(teamId)) {
//查询拼团队伍的进度
GroupBuyProgressVO groupBuyProgressVO = tradeOrderService.queryGroupBuyProgress(teamId);
if (null != groupBuyProgressVO &&
Objects.equals(groupBuyProgressVO.getTargetCount(), groupBuyProgressVO.getLockCount())) {
log.info("交易锁单拦截-拼单目标已达成:{} {}", userId, teamId);
return Response.<LockMarketPayOrderResponseDTO>builder()
.code(ResponseCode.E0006.getCode())
.info(ResponseCode.E0006.getInfo())
.build();
}
}
// 营销优惠试算
TrialBalanceEntity trialBalanceEntity =
indexGroupBuyMarketService.indexMarketTrial(MarketProductEntity.builder()
.userId(userId)
.source(source)
.channel(channel)
.goodsId(goodsId)
.activityId(activityId)
.build());
// 人群限定
if (!trialBalanceEntity.getIsVisible() || !trialBalanceEntity.getIsEnable()) {
return Response.<LockMarketPayOrderResponseDTO>builder()
.code(ResponseCode.E0007.getCode())
.info(ResponseCode.E0007.getInfo())
.build();
}
GroupBuyActivityDiscountVO groupBuyActivityDiscountVO =
trialBalanceEntity.getGroupBuyActivityDiscountVO();
// 营销优惠锁单,这个teamId是请求锁单dto传入的,如果teamId是空说明是首次拼团(发起拼团),非空则是参与拼图,
//发起拼团的不需要在redis抢占队伍名额,否则需要在redis抢占下该拼团队伍的名额
//outTradeNo:请求锁单dto -> 拼团详单
//notifyConfigVO回调由请求锁单dto配置,调用请求锁单的应用要给dto设置一个锁单成功之后的业务流程走向(即回调地址)
marketPayOrderEntity = tradeOrderService.lockMarketPayOrder(
UserEntity.builder().userId(userId).build(),
PayActivityEntity.builder()
.teamId(teamId)
.activityId(activityId)
.activityName(groupBuyActivityDiscountVO.getActivityName())
.startTime(groupBuyActivityDiscountVO.getStartTime())
.endTime(groupBuyActivityDiscountVO.getEndTime())
.validTime(groupBuyActivityDiscountVO.getValidTime())
.targetCount(groupBuyActivityDiscountVO.getTarget())
.build(),
PayDiscountEntity.builder()
.source(source)
.channel(channel)
.goodsId(goodsId)
.goodsName(trialBalanceEntity.getGoodsName())
.originalPrice(trialBalanceEntity.getOriginalPrice())
.deductionPrice(trialBalanceEntity.getDeductionPrice())
.payPrice(trialBalanceEntity.getPayPrice())
.outTradeNo(outTradeNo)
.notifyConfigVO(
// 构建回调通知对象
NotifyConfigVO.builder()
.notifyType(NotifyTypeEnumVO.valueOf(notifyConfigVO.getNotifyType()))
.notifyMQ(notifyConfigVO.getNotifyMQ())
.notifyUrl(notifyConfigVO.getNotifyUrl())
.build())
.build());
log.info("交易锁单记录(新):{} marketPayOrderEntity:{}", userId, JSON.toJSONString(marketPayOrderEntity));
// 返回结果
return Response.<LockMarketPayOrderResponseDTO>builder()
.code(ResponseCode.SUCCESS.getCode())
.info(ResponseCode.SUCCESS.getInfo())
.data(LockMarketPayOrderResponseDTO.builder()
.orderId(marketPayOrderEntity.getOrderId())
.originalPrice(marketPayOrderEntity.getOriginalPrice())
.deductionPrice(marketPayOrderEntity.getDeductionPrice())
.payPrice(marketPayOrderEntity.getPayPrice())
.tradeOrderStatus(marketPayOrderEntity.getTradeOrderStatusEnumVO().getCode())
.teamId(marketPayOrderEntity.getTeamId())
.build())
.build();
} catch (AppException e) {
log.error("营销交易锁单业务异常:{} LockMarketPayOrderRequestDTO:{}", requestDTO.getUserId(), JSON.toJSONString(requestDTO), e);
return Response.<LockMarketPayOrderResponseDTO>builder()
.code(e.getCode())
.info(e.getInfo())
.build();
} catch (Exception e) {
log.error("营销交易锁单服务失败:{} LockMarketPayOrderRequestDTO:{}", requestDTO.getUserId(), JSON.toJSONString(requestDTO), e);
return Response.<LockMarketPayOrderResponseDTO>builder()
.code(ResponseCode.UN_ERROR.getCode())
.info(ResponseCode.UN_ERROR.getInfo())
.build();
}
}
@RequestMapping(value = "settlement_market_pay_order", method = RequestMethod.POST)
@Override
public Response<SettlementMarketPayOrderResponseDTO> settlementMarketPayOrder(@RequestBody SettlementMarketPayOrderRequestDTO requestDTO) {
try {
log.info("营销交易组队结算开始:{} outTradeNo:{}", requestDTO.getUserId(), requestDTO.getOutTradeNo());
if (StringUtils.isBlank(requestDTO.getUserId()) ||
StringUtils.isBlank(requestDTO.getSource()) ||
StringUtils.isBlank(requestDTO.getChannel()) ||
StringUtils.isBlank(requestDTO.getOutTradeNo()) ||
null == requestDTO.getOutTradeTime()) {
return Response.<SettlementMarketPayOrderResponseDTO>builder()
.code(ResponseCode.ILLEGAL_PARAMETER.getCode())
.info(ResponseCode.ILLEGAL_PARAMETER.getInfo())
.build();
}
// 1. 结算服务
TradePaySettlementEntity tradePaySettlementEntity =
tradeSettlementOrderService.settlementMarketPayOrder(TradePaySuccessEntity.builder()
.source(requestDTO.getSource())
.channel(requestDTO.getChannel())
.userId(requestDTO.getUserId())
.outTradeNo(requestDTO.getOutTradeNo())
.outTradeTime(requestDTO.getOutTradeTime())
.build());
SettlementMarketPayOrderResponseDTO responseDTO =
SettlementMarketPayOrderResponseDTO.builder()
.userId(tradePaySettlementEntity.getUserId())
.teamId(tradePaySettlementEntity.getTeamId())
.activityId(tradePaySettlementEntity.getActivityId())
.outTradeNo(tradePaySettlementEntity.getOutTradeNo())
.build();
// 返回结果
Response<SettlementMarketPayOrderResponseDTO> response =
Response.<SettlementMarketPayOrderResponseDTO>builder()
.code(ResponseCode.SUCCESS.getCode())
.info(ResponseCode.SUCCESS.getInfo())
.data(responseDTO)
.build();
log.info("营销交易组队结算完成:{} outTradeNo:{} response:{}", requestDTO.getUserId(), requestDTO.getOutTradeNo(), JSON.toJSONString(response));
return response;
} catch (AppException e) {
log.error("营销交易组队结算异常:{} LockMarketPayOrderRequestDTO:{}", requestDTO.getUserId(), JSON.toJSONString(requestDTO), e);
return Response.<SettlementMarketPayOrderResponseDTO>builder()
.code(e.getCode())
.info(e.getInfo())
.build();
} catch (Exception e) {
log.error("营销交易组队结算失败:{} LockMarketPayOrderRequestDTO:{}", requestDTO.getUserId(), JSON.toJSONString(requestDTO), e);
return Response.<SettlementMarketPayOrderResponseDTO>builder()
.code(ResponseCode.UN_ERROR.getCode())
.info(ResponseCode.UN_ERROR.getInfo())
.build();
}
}
@RequestMapping(value = "refund_market_pay_order", method = RequestMethod.POST)
@Override
public Response<RefundMarketPayOrderResponseDTO> refundMarketPayOrder(@RequestBody RefundMarketPayOrderRequestDTO requestDTO) {
try {
log.info("营销拼团退单开始:{} outTradeNo:{}", requestDTO.getUserId(), requestDTO.getOutTradeNo());
if (StringUtils.isBlank(requestDTO.getUserId()) ||
StringUtils.isBlank(requestDTO.getOutTradeNo()) ||
StringUtils.isBlank(requestDTO.getSource()) ||
StringUtils.isBlank(requestDTO.getChannel())) {
return Response.<RefundMarketPayOrderResponseDTO>builder()
.code(ResponseCode.ILLEGAL_PARAMETER.getCode())
.info(ResponseCode.ILLEGAL_PARAMETER.getInfo())
.build();
}
// 1. 退单服务
TradeRefundBehaviorEntity tradeRefundBehaviorEntity =
tradeRefundOrderService.refundOrder(TradeRefundCommandEntity.builder()
.userId(requestDTO.getUserId())
.outTradeNo(requestDTO.getOutTradeNo())
.source(requestDTO.getSource())
.channel(requestDTO.getChannel())
.build());
RefundMarketPayOrderResponseDTO responseDTO = RefundMarketPayOrderResponseDTO.builder()
.userId(tradeRefundBehaviorEntity.getUserId())
.orderId(tradeRefundBehaviorEntity.getOrderId())
.teamId(tradeRefundBehaviorEntity.getTeamId())
.code(tradeRefundBehaviorEntity.getTradeRefundBehaviorEnum().getCode())
.info(tradeRefundBehaviorEntity.getTradeRefundBehaviorEnum().getInfo())
.build();
// 返回结果
Response<RefundMarketPayOrderResponseDTO> response = Response.<RefundMarketPayOrderResponseDTO>builder()
.code(ResponseCode.SUCCESS.getCode())
.info(ResponseCode.SUCCESS.getInfo())
.data(responseDTO)
.build();
log.info("营销拼团退单完成:{} outTradeNo:{} response:{}", requestDTO.getUserId(), requestDTO.getOutTradeNo(), JSON.toJSONString(response));
return response;
} catch (AppException e) {
log.error("营销拼团退单异常:{} RefundMarketPayOrderRequestDTO:{}", requestDTO.getUserId(), JSON.toJSONString(requestDTO), e);
return Response.<RefundMarketPayOrderResponseDTO>builder()
.code(e.getCode())
.info(e.getInfo())
.build();
} catch (Exception e) {
log.error("营销拼团退单失败:{} RefundMarketPayOrderRequestDTO:{}", requestDTO.getUserId(), JSON.toJSONString(requestDTO), e);
return Response.<RefundMarketPayOrderResponseDTO>builder()
.code(ResponseCode.UN_ERROR.getCode())
.info(ResponseCode.UN_ERROR.getInfo())
.build();
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/http/MarketTradeController.java
|
Java
|
unknown
| 17,489
|
package zack.project.trigger.http;
import zack.project.api.dto.NotifyRequestDTO;
import com.alibaba.fastjson2.JSON;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.bind.annotation.*;
/**
* @author zack
* @description 回调服务接口测试
* @create 2025-01-31 08:59
*/
@Slf4j
@RestController()
@CrossOrigin("*")
@RequestMapping("/api/v1/test/")
public class TestApiClientController {
/**
* 模拟回调案例
*
* @param notifyRequestDTO 通知回调参数
* @return success 成功,error 失败
*/
@RequestMapping(value = "group_buy_notify", method = RequestMethod.POST)
public String groupBuyNotify(@RequestBody NotifyRequestDTO notifyRequestDTO) {
log.info("模拟测试第三方服务接收拼团回调 {}", JSON.toJSONString(notifyRequestDTO));
return "success";
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/http/TestApiClientController.java
|
Java
|
unknown
| 858
|
/**
* HTTP 接口服务
*/
package zack.project.trigger.http;
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/http/package-info.java
|
Java
|
unknown
| 63
|
package zack.project.trigger.job;
import zack.project.domain.trade.service.ITradeTaskService;
import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.redisson.api.RLock;
import org.redisson.api.RedissonClient;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* @description 拼团完结回调通知任务;拼团回调任务表,实际公司场景会定时清理数据结转,不会有太多数据挤压
* @create 2025-01-31 10:27
*/
@Slf4j
@Service
public class GroupBuyNotifyJob {
@Resource
private ITradeTaskService tradeTaskService;
@Resource
private RedissonClient redissonClient;
@Scheduled(cron = "0 0 0 * * ?")
public void exec() {
// 为什么加锁?
// 分布式应用N台机器部署互备(一个应用实例挂了,还有另外可用的),任务调度会有N个同时执行,那么这里需要增加抢占机制,
// 谁抢占到谁就执行。完毕后,下一轮继续抢占。
RLock lock = redissonClient.getLock("group_buy_market_notify_job_exec");
try {
// waitTime:等待获取锁的最长时间
// leaseTime:租约时间,如果当前线程成功获取到锁,那么锁将被持有的时间长度。
// 这个时间过后,锁会自动释放。续租时间可按照执行方法时间的耗时max来设置。如 50毫秒
boolean isLocked = lock.tryLock(3, 0, TimeUnit.SECONDS);
if (!isLocked) return;
//查询数据库中所有回调任务(需要重试的和刚创建的)
Map<String, Integer> result = tradeTaskService.execNotifyJob();
log.info("定时任务,回调通知完成 result:{}", JSON.toJSONString(result));
} catch (Exception e) {
log.error("定时任务,回调通知完成失败", e);
} finally {
if (lock.isLocked() && lock.isHeldByCurrentThread()) {
lock.unlock();
}
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/job/GroupBuyNotifyJob.java
|
Java
|
unknown
| 2,159
|
package zack.project.trigger.job;
import zack.project.domain.activity.model.entity.UserGroupBuyOrderDetailEntity;
import zack.project.domain.trade.model.entity.TradeRefundCommandEntity;
import zack.project.domain.trade.service.ITradeRefundOrderService;
import lombok.extern.slf4j.Slf4j;
import org.redisson.api.RLock;
import org.redisson.api.RedissonClient;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* @description 超时未支付订单退单定时任务
* @create 2025-01-31 15:00
*/
@Slf4j
@Service
public class TimeoutRefundJob {
@Resource
private ITradeRefundOrderService tradeRefundOrderService;
@Resource
private RedissonClient redissonClient;
/**
* 每5分钟执行一次超时订单扫描
*/
@Scheduled(cron = "0 */1 * * * ?")
public void exec() {
// 分布式锁,防止多实例重复执行
RLock lock = redissonClient.getLock("group_buy_market_timeout_refund_job_exec");
try {
// waitTime:等待获取锁的最长时间
// leaseTime:租约时间,锁的持有时间
boolean isLocked = lock.tryLock(3, 60, TimeUnit.SECONDS);
if (!isLocked) {
log.info("超时退单定时任务,获取锁失败,跳过本次执行");
return;
}
log.info("超时退单定时任务开始执行");
// 查询超时未支付订单列表
List<UserGroupBuyOrderDetailEntity> timeoutOrderList =
tradeRefundOrderService.queryTimeoutUnpaidOrderList();
if (timeoutOrderList == null || timeoutOrderList.isEmpty()) {
log.info("超时退单定时任务,未发现超时未支付订单");
return;
}
log.info("超时退单定时任务,发现超时未支付订单数量:{}", timeoutOrderList.size());
int successCount = 0;
int failCount = 0;
// 遍历处理每个超时订单
for (UserGroupBuyOrderDetailEntity orderDetail : timeoutOrderList) {
try {
// 构建退单命令
TradeRefundCommandEntity refundCommand = TradeRefundCommandEntity.builder()
.userId(orderDetail.getUserId())
.outTradeNo(orderDetail.getOutTradeNo())
.source(orderDetail.getSource())
.channel(orderDetail.getChannel())
.build();
// 执行退单
tradeRefundOrderService.refundOrder(refundCommand);
successCount++;
log.info("超时订单退单成功,用户ID:{},交易单号:{}", orderDetail.getUserId(), orderDetail.getOutTradeNo());
} catch (Exception e) {
failCount++;
log.error("超时订单退单失败,用户ID:{},交易单号:{},错误信息:{}",
orderDetail.getUserId(), orderDetail.getOutTradeNo(), e.getMessage(), e);
}
}
log.info("超时退单定时任务执行完成,成功:{},失败:{}", successCount, failCount);
} catch (Exception e) {
log.error("超时退单定时任务执行异常", e);
} finally {
if (lock.isLocked() && lock.isHeldByCurrentThread()) {
lock.unlock();
}
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/job/TimeoutRefundJob.java
|
Java
|
unknown
| 3,791
|
/**
* 任务服务,可以选择使用 Spring 默认提供的 Schedule https://bugstack.cn/md/road-map/quartz.html
*/
package zack.project.trigger.job;
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/job/package-info.java
|
Java
|
unknown
| 154
|
package zack.project.trigger.listener;
import zack.project.domain.trade.model.valobj.TeamRefundSuccess;
import zack.project.domain.trade.service.ITradeRefundOrderService;
import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.core.ExchangeTypes;
import org.springframework.amqp.rabbit.annotation.Exchange;
import org.springframework.amqp.rabbit.annotation.Queue;
import org.springframework.amqp.rabbit.annotation.QueueBinding;
import org.springframework.amqp.rabbit.annotation.RabbitListener;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
/**
* @description 结算完成消息监听
* @create 2025-03-08 13:49
*/
@Slf4j
@Component
public class RefundSuccessTopicListener {
@Resource
private ITradeRefundOrderService tradeRefundOrderService;
/**
* 此流程具备最终一致性;
* 1. 数据库锁单量恢复完成,本地消息表补偿MQ,确保MQ消息一定会发送。
* 2. MQ 消息消费,恢复锁单量库存。库存时添加分布式锁,确保不会重复操作。
* 3. MQ 消息重试,确保在失败情况下,可以重复消息,又因为有分布式锁的处理,可以确保重复消费也不会重复添加锁单量库粗。
*/
@RabbitListener(
bindings = @QueueBinding(
value = @Queue(value = "${spring.rabbitmq.config.producer.topic_team_refund.queue}"),
exchange = @Exchange(value = "${spring.rabbitmq.config.producer.exchange}", type = ExchangeTypes.TOPIC),
key = "${spring.rabbitmq.config.producer.topic_team_refund.routing_key}"
)
)
public void listener(String message) {
log.info("接收消息(退单成功)- 恢复拼团队伍锁单量:{}", message);
TeamRefundSuccess teamRefundSuccess = JSON.parseObject(message, TeamRefundSuccess.class);
try {
tradeRefundOrderService.restoreTeamLockStock(teamRefundSuccess);
} catch (Exception e) {
log.info("接收消息(退单成功)- 恢复拼团队伍锁单量失败:{}", message, e);
// 抛异常,mq消息会重试
throw new RuntimeException(e);
}
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/listener/RefundSuccessTopicListener.java
|
Java
|
unknown
| 2,257
|
package zack.project.trigger.listener;
import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.core.ExchangeTypes;
import org.springframework.amqp.rabbit.annotation.Exchange;
import org.springframework.amqp.rabbit.annotation.Queue;
import org.springframework.amqp.rabbit.annotation.QueueBinding;
import org.springframework.amqp.rabbit.annotation.RabbitListener;
import org.springframework.stereotype.Component;
/**
* @description 结算完成消息监听
* @create 2025-03-08 13:49
*/
@Slf4j
@Component
public class TeamSuccessTopicListener {
@RabbitListener(
bindings = @QueueBinding(
value = @Queue(value = "${spring.rabbitmq.config.producer.topic_team_success.queue}"),
exchange = @Exchange(value = "${spring.rabbitmq.config.producer.exchange}", type = ExchangeTypes.TOPIC),
key = "${spring.rabbitmq.config.producer.topic_team_success.routing_key}"
)
)
public void listener(String message) {
log.info("接收消息(组队成功):{}", message);
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/listener/TeamSuccessTopicListener.java
|
Java
|
unknown
| 1,080
|
/**
* 监听服务;在单体服务中,解耦流程。类似MQ的使用,如Spring的Event,Guava的事件总线都可以。如果使用了 Redis 那么也可以有发布/订阅使用。
* Guava:https://bugstack.cn/md/road-map/guava.html
*/
package zack.project.trigger.listener;
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-trigger/src/main/java/zack/project/trigger/listener/package-info.java
|
Java
|
unknown
| 289
|
package zack.project.types.annotations;
import java.lang.annotation.*;
/**
* @author zack.project
* @description 注解,动态配置中心标记
* @create 2025-01-03 15:06
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD})
@Documented
public @interface DCCValue {
String value() default "";
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/annotations/DCCValue.java
|
Java
|
unknown
| 324
|
package zack.project.types.common;
public class Constants {
public final static String SPLIT = ",";
public final static String UNDERLINE = "_";
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/common/Constants.java
|
Java
|
unknown
| 157
|
package zack.project.types.design.framework.link.model1;
/**
* @description 抽象类
* @create 2025-01-18 09:14
*/
public abstract class AbstractLogicLink<T, D, R> implements ILogicLink<T, D, R> {
private ILogicLink<T, D, R> next;
@Override
public ILogicLink<T, D, R> next() {
return next;
}
@Override
public ILogicLink<T, D, R> appendNext(ILogicLink<T, D, R> next) {
this.next = next;
return next;
}
protected R next(T requestParameter, D dynamicContext) throws Exception {
return next.apply(requestParameter, dynamicContext);
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model1/AbstractLogicLink.java
|
Java
|
unknown
| 610
|
package zack.project.types.design.framework.link.model1;
/**
* @author zack.project
* @description 责任链装配
* @create 2025-01-18 09:10
*/
public interface ILogicChainArmory<T, D, R> {
ILogicLink<T, D, R> next();
ILogicLink<T, D, R> appendNext(ILogicLink<T, D, R> next);
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model1/ILogicChainArmory.java
|
Java
|
unknown
| 295
|
package zack.project.types.design.framework.link.model1;
/**
* @author zack.project
* @description 略规则责任链接口
* @create 2025-01-18 09:09
*/
public interface ILogicLink<T, D, R> extends ILogicChainArmory<T, D, R> {
R apply(T requestParameter, D dynamicContext) throws Exception;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model1/ILogicLink.java
|
Java
|
unknown
| 305
|
package zack.project.types.design.framework.link.model2;
import zack.project.types.design.framework.link.model2.chain.BusinessLinkedList;
import zack.project.types.design.framework.link.model2.handler.ILogicHandler;
/**
* @author zack.project
* @description 链路装配
* @create 2025-01-18 10:02
*/
public class LinkArmory<T, D, R> {
private final BusinessLinkedList<T, D, R> logicLink;
@SafeVarargs
public LinkArmory(String linkName, ILogicHandler<T, D, R>... logicHandlers) {
logicLink = new BusinessLinkedList<>(linkName);
for (ILogicHandler<T, D, R> logicHandler: logicHandlers){
logicLink.add(logicHandler);
}
}
public BusinessLinkedList<T, D, R> getLogicLink() {
return logicLink;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model2/LinkArmory.java
|
Java
|
unknown
| 771
|
package zack.project.types.design.framework.link.model2.chain;
import zack.project.types.design.framework.link.model2.handler.ILogicHandler;
/**
* @description 业务链路
* @create 2025-01-18 10:27
*/
public class BusinessLinkedList<T, D, R> extends LinkedList<ILogicHandler<T, D, R>> implements ILogicHandler<T, D, R>{
public BusinessLinkedList(String name) {
super(name);
}
@Override
public R apply(T requestParameter, D dynamicContext) throws Exception {
Node<ILogicHandler<T, D, R>> current = this.first;
do {
ILogicHandler<T, D, R> item = current.item;
R apply = item.apply(requestParameter, dynamicContext);
if (null != apply) return apply;
current = current.next;
} while (null != current);
return null;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model2/chain/BusinessLinkedList.java
|
Java
|
unknown
| 835
|
package zack.project.types.design.framework.link.model2.chain;
/**
* @author zack.project
* @description 链接口
* @create 2025-01-18 09:27
*/
public interface ILink<E> {
boolean add(E e);
boolean addFirst(E e);
boolean addLast(E e);
boolean remove(Object o);
E get(int index);
void printLinkList();
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model2/chain/ILink.java
|
Java
|
unknown
| 340
|
package zack.project.types.design.framework.link.model2.chain;
/**
* @description 功能链路
* @create 2025-01-18 09:31
*/
public class LinkedList<E> implements ILink<E> {
/**
* 责任链名称
*/
private final String name;
transient int size = 0;
transient Node<E> first;
transient Node<E> last;
public LinkedList(String name) {
this.name = name;
}
void linkFirst(E e) {
final Node<E> f = first;
final Node<E> newNode = new Node<>(null, e, f);
first = newNode;
if (f == null)
last = newNode;
else
f.prev = newNode;
size++;
}
void linkLast(E e) {
final Node<E> l = last;
final Node<E> newNode = new Node<>(l, e, null);
last = newNode;
if (l == null) {
first = newNode;
} else {
l.next = newNode;
}
size++;
}
@Override
public boolean add(E e) {
linkLast(e);
return true;
}
@Override
public boolean addFirst(E e) {
linkFirst(e);
return true;
}
@Override
public boolean addLast(E e) {
linkLast(e);
return true;
}
@Override
public boolean remove(Object o) {
if (o == null) {
for (Node<E> x = first; x != null; x = x.next) {
if (x.item == null) {
unlink(x);
return true;
}
}
} else {
for (Node<E> x = first; x != null; x = x.next) {
if (o.equals(x.item)) {
unlink(x);
return true;
}
}
}
return false;
}
E unlink(Node<E> x) {
final E element = x.item;
final Node<E> next = x.next;
final Node<E> prev = x.prev;
if (prev == null) {
first = next;
} else {
prev.next = next;
x.prev = null;
}
if (next == null) {
last = prev;
} else {
next.prev = prev;
x.next = null;
}
x.item = null;
size--;
return element;
}
@Override
public E get(int index) {
return node(index).item;
}
Node<E> node(int index) {
if (index < (size >> 1)) {
Node<E> x = first;
for (int i = 0; i < index; i++)
x = x.next;
return x;
} else {
Node<E> x = last;
for (int i = size - 1; i > index; i--)
x = x.prev;
return x;
}
}
public void printLinkList() {
if (this.size == 0) {
System.out.println("链表为空");
} else {
Node<E> temp = first;
System.out.print("目前的列表,头节点:" + first.item + " 尾节点:" + last.item + " 整体:");
while (temp != null) {
System.out.print(temp.item + ",");
temp = temp.next;
}
System.out.println();
}
}
protected static class Node<E> {
E item;
Node<E> next;
Node<E> prev;
public Node(Node<E> prev, E element, Node<E> next) {
this.item = element;
this.next = next;
this.prev = prev;
}
}
public String getName() {
return name;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model2/chain/LinkedList.java
|
Java
|
unknown
| 3,474
|
package zack.project.types.design.framework.link.model2.handler;
/**
* @description 逻辑处理器
* @create 2025-01-18 09:43
*/
public interface ILogicHandler<T, D, R> {
default R next(T requestParameter, D dynamicContext) {
return null;
}
R apply(T requestParameter, D dynamicContext) throws Exception;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/model2/handler/ILogicHandler.java
|
Java
|
unknown
| 335
|
/**
* model1 为单实例链
* model2 为多实例链
*
* @description 通用设计模板;责任链
* @author zack.project
* @create 2024-12-14 12:04
*/
package zack.project.types.design.framework.link;
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/link/package-info.java
|
Java
|
unknown
| 211
|
package zack.project.types.design.framework.tree;
import lombok.Getter;
import lombok.Setter;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
/**
* @author zack.project
* @description 异步资源加载策略
* @create 2024-12-21 08:48
*/
public abstract class AbstractMultiThreadStrategyRouter<T, D, R> implements StrategyMapper<T, D, R>, StrategyHandler<T, D, R> {
@Getter
@Setter
protected StrategyHandler<T, D, R> defaultStrategyHandler = StrategyHandler.DEFAULT;
public R router(T requestParameter, D dynamicContext) throws Exception {
StrategyHandler<T, D, R> strategyHandler = get(requestParameter, dynamicContext);
if(null != strategyHandler) return strategyHandler.apply(requestParameter, dynamicContext);
return defaultStrategyHandler.apply(requestParameter, dynamicContext);
}
@Override
public R apply(T requestParameter, D dynamicContext) throws Exception {
// 异步加载数据
multiThread(requestParameter, dynamicContext);
// 业务流程受理
return doApply(requestParameter, dynamicContext);
}
/**
* 异步加载数据
*/
protected abstract void multiThread(T requestParameter, D dynamicContext) throws ExecutionException, InterruptedException, TimeoutException;
/**
* 业务流程受理
*/
protected abstract R doApply(T requestParameter, D dynamicContext) throws Exception;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/tree/AbstractMultiThreadStrategyRouter.java
|
Java
|
unknown
| 1,480
|
package zack.project.types.design.framework.tree;
import lombok.Getter;
import lombok.Setter;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
public abstract class AbstractMutliThreadStrategyRouter<T,D,R> implements StrategyHandler <T,D,R> ,StrategyMapper<T,D,R>{
@Getter
@Setter
protected StrategyHandler<T,D,R> DefaultStrategyHandler= StrategyHandler.DEFAULT;
public R router(T requestParameter,D dynamicContext) throws Exception {
StrategyHandler<T, D, R> StrategyHandler = get(requestParameter, dynamicContext);
if( null != StrategyHandler )
return StrategyHandler.apply(requestParameter,dynamicContext);
return (R) DefaultStrategyHandler.apply(requestParameter,dynamicContext);
}
@Override
public R apply(T requestParameter, D dynamicContext) throws Exception {
mutliThread(requestParameter,dynamicContext);
return doApply(requestParameter,dynamicContext);
}
protected abstract void mutliThread(T requestParameter,D dynamicContext)throws ExecutionException, InterruptedException, TimeoutException;
protected abstract R doApply(T requestParameter, D dynamicContext) throws Exception;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/tree/AbstractMutliThreadStrategyRouter.java
|
Java
|
unknown
| 1,236
|
package zack.project.types.design.framework.tree;
import lombok.Getter;
import lombok.Setter;
/**
* @author zack.project
* @description 策略路由抽象类
* @create 2024-12-14 13:25
*/
public abstract class AbstractStrategyRouter<T, D, R> implements StrategyMapper<T, D, R>, StrategyHandler<T, D, R> {
@Getter
@Setter
protected StrategyHandler<T, D, R> defaultStrategyHandler = StrategyHandler.DEFAULT;
public R router(T requestParameter, D dynamicContext) throws Exception {
StrategyHandler<T, D, R> strategyHandler = get(requestParameter, dynamicContext);
if(null != strategyHandler) return strategyHandler.apply(requestParameter, dynamicContext);
return defaultStrategyHandler.apply(requestParameter, dynamicContext);
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/tree/AbstractStrategyRouter.java
|
Java
|
unknown
| 781
|
package zack.project.types.design.framework.tree;
/**
* @author zack.project
* @description 受理策略处理
* T 入参类型
* D 上下文参数
* R 返参类型
* @create 2024-12-14 12:06
*/
public interface StrategyHandler<T, D, R> {
StrategyHandler DEFAULT = (T, D) -> null;
R apply(T requestParameter, D dynamicContext) throws Exception;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/tree/StrategyHandler.java
|
Java
|
unknown
| 365
|
package zack.project.types.design.framework.tree;
/**
* @author zack.project
* @description 策略映射器
* T 入参类型
* D 上下文参数
* R 返参类型
* @create 2024-12-14 12:05
*/
public interface StrategyMapper<T, D, R> {
/**
* 获取待执行策略
*
* @param requestParameter 入参
* @param dynamicContext 上下文
* @return 返参
* @throws Exception 异常
*/
StrategyHandler<T, D, R> get(T requestParameter, D dynamicContext) throws Exception;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/tree/StrategyMapper.java
|
Java
|
unknown
| 521
|
/**
* @description 通用设计模板;规则树
* @author zack.project
* @create 2024-12-14 12:02
*/
package zack.project.types.design.framework.tree;
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/design/framework/tree/package-info.java
|
Java
|
unknown
| 156
|
package zack.project.types.enums;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
/**
* @description 拼团活动状态枚举
* @create 2025-01-25 12:29
*/
@Getter
@AllArgsConstructor
@NoArgsConstructor
public enum ActivityStatusEnumVO {
CREATE(0, "创建"),
EFFECTIVE(1, "生效"),
OVERDUE(2, "过期"),
ABANDONED(3, "废弃"),
;
private Integer code;
private String info;
public static ActivityStatusEnumVO valueOf(Integer code) {
switch (code) {
case 0:
return CREATE;
case 1:
return EFFECTIVE;
case 2:
return OVERDUE;
case 3:
return ABANDONED;
}
throw new RuntimeException("err code not exist!");
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/enums/ActivityStatusEnumVO.java
|
Java
|
unknown
| 827
|
package zack.project.types.enums;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
/**
* @description 拼团队伍状态枚举
* @create 2025-01-26 16:21
*/
@Getter
@AllArgsConstructor
@NoArgsConstructor
public enum GroupBuyOrderEnumVO {
PROGRESS(0, "拼单中"),
COMPLETE(1, "完成"),
FAIL(2, "失败"),
COMPLETE_FAIL(3, "完成-含退单"),
;
private Integer code;
private String info;
public static GroupBuyOrderEnumVO valueOf(Integer code) {
switch (code) {
case 0:
return PROGRESS;
case 1:
return COMPLETE;
case 2:
return FAIL;
case 3:
return COMPLETE_FAIL;
}
throw new RuntimeException("err code not exist!");
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/enums/GroupBuyOrderEnumVO.java
|
Java
|
unknown
| 842
|
package zack.project.types.enums;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
/**
* @description 回调任务状态
* @create 2025-01-31 13:44
*/
@Getter
@AllArgsConstructor
@NoArgsConstructor
public enum NotifyTaskHTTPEnumVO {
SUCCESS("success", "成功"),
ERROR("error", "失败"),
NULL(null, "空执行"),
;
private String code;
private String info;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/enums/NotifyTaskHTTPEnumVO.java
|
Java
|
unknown
| 432
|
package zack.project.types.enums;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
@AllArgsConstructor
@NoArgsConstructor
@Getter
public enum ResponseCode {
SUCCESS("0000", "成功"),
UN_ERROR("0001", "未知失败"),
ILLEGAL_PARAMETER("0002", "非法参数"),
INDEX_EXCEPTION("0003", "唯一索引冲突"),
UPDATE_ZERO("0004", "更新记录为0"),
HTTP_EXCEPTION("0005", "HTTP接口调用异常"),
RATE_LIMITER("0006", "接口限流"),
RPC_EXCEPTION("0007","rpc接口调用异常"),
E0001("E0001", "不存在对应的折扣计算服务"),
E0002("E0002", "无拼团营销配置"),
E0003("E0003", "拼团活动降级拦截"),
E0004("E0004", "拼团活动切量拦截"),
E0005("E0005", "拼团组队失败,记录更新为0"),
E0006("E0006", "拼团组队完结,锁单量已达成"),
E0007("E0007", "拼团人群限定,不可参与"),
E0008("E0008", "拼团组队失败,缓存库存不足"),
E0101("E0101", "拼团活动未生效"),
E0102("E0102", "不在拼团活动有效时间内"),
E0103("E0103", "当前用户参与此拼团次数已达上限"),
E0104("E0104", "不存在的外部交易单号或用户已退单"),
E0105("E0105", "SC渠道黑名单拦截"),
E0106("E0106", "订单交易时间不在拼团有效时间范围内"),
;
private String code;
private String info;
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/enums/ResponseCode.java
|
Java
|
unknown
| 1,431
|
package zack.project.types.event;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* @description 基础时间
* @create 2024-03-30 12:42
*/
@Data
public abstract class BaseEvent<T> {
public abstract EventMessage<T> buildEventMessage(T data);
public abstract String topic();
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public static class EventMessage<T> {
private String id;
private Date timestamp;
private T data;
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/event/BaseEvent.java
|
Java
|
unknown
| 584
|
package zack.project.types.exception;
import lombok.Data;
import lombok.EqualsAndHashCode;
import zack.project.types.enums.ResponseCode;
@EqualsAndHashCode(callSuper = true)
@Data
public class AppException extends RuntimeException {
private static final long serialVersionUID = 5317680961212299217L;
/** 异常码 */
private String code;
/** 异常信息 */
private String info;
public AppException(String code) {
this.code = code;
}
public AppException(ResponseCode responseCode) {
this.code = responseCode.getCode();
this.info = responseCode.getInfo();
}
public AppException(String code, Throwable cause) {
this.code = code;
super.initCause(cause);
}
public AppException(String code, String message) {
this.code = code;
this.info = message;
}
public AppException(String code, String message, Throwable cause) {
this.code = code;
this.info = message;
super.initCause(cause);
}
@Override
public String toString() {
return "cn.bugstack.types.exception.AppException{" +
"code='" + code + '\'' +
", info='" + info + '\'' +
'}';
}
}
|
2301_82000044/group-buy-market-z
|
group-buy-market-z-types/src/main/java/zack/project/types/exception/AppException.java
|
Java
|
unknown
| 1,250
|
#!/bin/bash
# COCO 2017 dataset http://cocodataset.org
# Download command: bash data/scripts/get_coco.sh
# Train command: python train.py --data coco.yaml
# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco
# /yolov5
# Download/unzip labels
d='../' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB
echo 'Downloading' $url$f ' ...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
# Download/unzip images
d='../coco/images' # unzip directory
url=http://images.cocodataset.org/zips/
f1='train2017.zip' # 19G, 118k images
f2='val2017.zip' # 1G, 5k images
f3='test2017.zip' # 7G, 41k images (optional)
for f in $f1 $f2; do
echo 'Downloading' $url$f '...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
done
wait # finish background tasks
|
2301_81045437/yolov7_plate
|
data/scripts/get_coco.sh
|
Shell
|
unknown
| 962
|
#!/bin/bash
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
# Download command: bash data/scripts/get_voc.sh
# Train command: python train.py --data voc.yaml
# Default dataset location is next to YOLOv5:
# /parent_folder
# /VOC
# /yolov5
start=$(date +%s)
mkdir -p ../tmp
cd ../tmp/
# Download/unzip images and labels
d='.' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images
f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images
f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images
for f in $f3 $f2 $f1; do
echo 'Downloading' $url$f '...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
done
wait # finish background tasks
end=$(date +%s)
runtime=$((end - start))
echo "Completed in" $runtime "seconds"
echo "Splitting dataset..."
python3 - "$@" <<END
import os
import xml.etree.ElementTree as ET
from os import getcwd
sets = [('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
def convert_box(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
return x * dw, y * dh, w * dw, h * dh
def convert_annotation(year, image_id):
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml' % (year, image_id))
out_file = open('VOCdevkit/VOC%s/labels/%s.txt' % (year, image_id), 'w')
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert_box((w, h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
cwd = getcwd()
for year, image_set in sets:
if not os.path.exists('VOCdevkit/VOC%s/labels/' % year):
os.makedirs('VOCdevkit/VOC%s/labels/' % year)
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt' % (year, image_set)).read().strip().split()
list_file = open('%s_%s.txt' % (year, image_set), 'w')
for image_id in image_ids:
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n' % (cwd, year, image_id))
convert_annotation(year, image_id)
list_file.close()
END
cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt >train.txt
cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt
mkdir ../VOC ../VOC/images ../VOC/images/train ../VOC/images/val
mkdir ../VOC/labels ../VOC/labels/train ../VOC/labels/val
python3 - "$@" <<END
import os
print(os.path.exists('../tmp/train.txt'))
with open('../tmp/train.txt', 'r') as f:
for line in f.readlines():
line = "/".join(line.split('/')[-5:]).strip()
if os.path.exists("../" + line):
os.system("cp ../" + line + " ../VOC/images/train")
line = line.replace('JPEGImages', 'labels').replace('jpg', 'txt')
if os.path.exists("../" + line):
os.system("cp ../" + line + " ../VOC/labels/train")
print(os.path.exists('../tmp/2007_test.txt'))
with open('../tmp/2007_test.txt', 'r') as f:
for line in f.readlines():
line = "/".join(line.split('/')[-5:]).strip()
if os.path.exists("../" + line):
os.system("cp ../" + line + " ../VOC/images/val")
line = line.replace('JPEGImages', 'labels').replace('jpg', 'txt')
if os.path.exists("../" + line):
os.system("cp ../" + line + " ../VOC/labels/val")
END
rm -rf ../tmp # remove temporary directory
echo "VOC download done."
|
2301_81045437/yolov7_plate
|
data/scripts/get_voc.sh
|
Shell
|
unknown
| 4,240
|
import os
import glob
import numpy as np
txtlist = glob.glob('widerface/val/*.txt')
for txt in txtlist:
dst = txt.replace('val', 'tmp')
fw = open(dst, 'w')
with open(txt, 'r') as f:
lines = f.readlines()
for line in lines:
data = np.array(line.strip().split(),dtype=np.float32)
print(line)
if len(np.where(data < 0)[0]) == 10:
label = '0 {:.4f} {:.4f} {:.4f} {:.4f} 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000'.format(data[1],data[2],data[3],data[4])
else:
label = '0 {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} 2.0000 {:.4f} {:.4f} 2.0000 {:.4f} {:.4f} 2.0000 {:.4f} {:.4f} 2.0000 {:.4f} {:.4f} 2.0000'.format(data[1],data[2],data[3],data[4],data[5],data[6],data[7],data[8],data[9],data[10],data[11],data[12],data[13],data[14])
fw.write(label + '\n')
fw.close()
|
2301_81045437/yolov7_plate
|
data/test.py
|
Python
|
unknown
| 859
|
import argparse
import time
from pathlib import Path
import os
import copy
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
def detect(opt):
source, weights, view_img, save_txt, imgsz, save_txt_tidl, kpt_label = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, opt.save_txt_tidl, opt.kpt_label
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://', 'https://'))
# Directories
save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if (save_txt or save_txt_tidl) else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' and not save_txt_tidl # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
if isinstance(imgsz, (list,tuple)):
assert len(imgsz) ==2; "height and width of image has to be specified"
imgsz[0] = check_img_size(imgsz[0], s=stride)
imgsz[1] = check_img_size(imgsz[1], s=stride)
else:
imgsz = check_img_size(imgsz, s=stride) # check img_size
names = model.module.names if hasattr(model, 'module') else model.names # get class names
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride)
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
t0 = time.time()
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
print(pred[...,4].max())
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms, kpt_label=kpt_label)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], det[:, :4], im0.shape, kpt_label=False)
scale_coords(img.shape[2:], det[:, 6:], im0.shape, kpt_label=kpt_label, step=3)
# Print results
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for det_index, (*xyxy, conf, cls) in enumerate(reversed(det[:,:6])):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or opt.save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
print(c)
label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}')
kpts = det[det_index, 6:]
plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness, kpt_label=kpt_label, kpts=kpts, steps=3, orig_shape=im0.shape[:2])
if opt.save_crop:
save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
if save_txt_tidl: # Write to file in tidl dump format
for *xyxy, conf, cls in det_tidl:
xyxy = torch.tensor(xyxy).view(-1).tolist()
line = (conf, cls, *xyxy) if opt.save_conf else (cls, *xyxy) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path += '.mp4'
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_txt_tidl or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt or save_txt_tidl else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov7-lite-s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
# parser.add_argument('--img-size', nargs= '+', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-txt-tidl', action='store_true', help='save results to *.txt in tidl format')
parser.add_argument('--save-bin', action='store_true', help='save base n/w outputs in raw bin format')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--kpt-label', type=int, default=5, help='number of keypoints')
opt = parser.parse_args()
print(opt)
check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect(opt=opt)
strip_optimizer(opt.weights)
else:
detect(opt=opt)
|
2301_81045437/yolov7_plate
|
detect.py
|
Python
|
unknown
| 10,819
|
import argparse
import time
import os
import copy
import cv2
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from models.experimental import attempt_load
from utils.general import non_max_suppression, scale_coords
from plate_recognition.plate_rec import get_plate_result,allFilePath,init_model,cv_imread
from plate_recognition.double_plate_split_merge import get_split_merge
from utils.datasets import letterbox
from utils.cv_puttext import cv2ImgAddText
def cv_imread(path):
img=cv2.imdecode(np.fromfile(path,dtype=np.uint8),-1)
return img
clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)]
def order_points(pts): #关键点按照(左上,右上,右下,左下)排列
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image, pts): #透视变换
# rect = order_points(pts)
rect=pts.astype("float32")
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def get_plate_rec_landmark(img, xyxy, conf, landmarks, class_num,device,plate_rec_model):
h,w,c = img.shape
result_dict={}
tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness
x1 = int(xyxy[0])
y1 = int(xyxy[1])
x2 = int(xyxy[2])
y2 = int(xyxy[3])
height=y2-y1
landmarks_np=np.zeros((4,2))
rect=[x1,y1,x2,y2]
for i in range(4):
point_x = int(landmarks[2 * i])
point_y = int(landmarks[2 * i + 1])
landmarks_np[i]=np.array([point_x,point_y])
class_label= int(class_num) #车牌的的类型0代表单牌,1代表双层车牌
roi_img = four_point_transform(img,landmarks_np) #透视变换得到车牌小图
# cv2.imwrite("roi.jpg",roi_img)
# roi_img_h = roi_img.shape[0]
# roi_img_w = roi_img.shape[1]
# if roi_img_w/roi_img_h<3:
# class_label=
# h_w_r = roi_img_w/roi_img_h
if class_label : #判断是否是双层车牌,是双牌的话进行分割后然后拼接
roi_img=get_split_merge(roi_img)
plate_number,rec_prob,plate_color,color_conf = get_plate_result(roi_img,device,plate_rec_model) #对车牌小图进行识别
result_dict['rect']=rect
result_dict['landmarks']=landmarks_np.tolist()
result_dict['plate_no']=plate_number
result_dict['rec_conf']=rec_prob #每个字符的概率
result_dict['plate_color']=plate_color
result_dict['color_conf']=color_conf
result_dict['roi_height']=roi_img.shape[0]
result_dict['score']=conf
result_dict['label']=class_label
return result_dict
def detect_Recognition_plate(model, orgimg, device,plate_rec_model,img_size):
conf_thres = 0.3
iou_thres = 0.5
dict_list=[]
im0 = copy.deepcopy(orgimg)
imgsz=(img_size,img_size)
img = letterbox(im0, new_shape=imgsz)[0]
img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x640X640
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
pred = model(img)[0]
pred = non_max_suppression(pred, conf_thres=conf_thres, iou_thres=iou_thres, kpt_label=4,agnostic=True)
for i, det in enumerate(pred):
if len(det):
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], det[:, :4], im0.shape, kpt_label=False)
scale_coords(img.shape[2:], det[:, 6:], im0.shape, kpt_label=4, step=3)
for j in range(det.size()[0]):
xyxy = det[j, :4].view(-1).tolist()
conf = det[j, 4].cpu().numpy()
landmarks = det[j, 6:].view(-1).tolist()
landmarks = [landmarks[0],landmarks[1],landmarks[3],landmarks[4],landmarks[6],landmarks[7],landmarks[9],landmarks[10]]
class_num = det[j, 5].cpu().numpy()
result_dict = get_plate_rec_landmark(orgimg, xyxy, conf, landmarks, class_num,device,plate_rec_model)
dict_list.append(result_dict)
return dict_list
def draw_result(orgimg,dict_list):
result_str =""
for result in dict_list:
rect_area = result['rect']
x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1]
padding_w = 0.05*w
padding_h = 0.11*h
rect_area[0]=max(0,int(x-padding_w))
rect_area[1]=max(0,int(y-padding_h))
rect_area[2]=min(orgimg.shape[1],int(rect_area[2]+padding_w))
rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h))
rect_area = [int(x) for x in rect_area]
height_area = result['roi_height']
landmarks=result['landmarks']
result_p = result['plate_no']+" "+result['plate_color']
result_str+=result_p+" "
cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(0,0,255),2) #画框
labelSize = cv2.getTextSize(result_p,cv2.FONT_HERSHEY_SIMPLEX,0.5,1)
if rect_area[0]+labelSize[0][0]>orgimg.shape[1]: #防止显示的文字越界
rect_area[0]=int(orgimg.shape[1]-labelSize[0][0])
orgimg=cv2.rectangle(orgimg,(rect_area[0],int(rect_area[1]-round(1.6*labelSize[0][1]))),(int(rect_area[0]+round(1.2*labelSize[0][0])),rect_area[1]+labelSize[1]),(255,255,255),cv2.FILLED)
if len(result)>1:
for i in range(4): #关键点
cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1)
orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0],int(rect_area[1]-round(1.6*labelSize[0][1])),(0,0,0),21)
# orgimg=cv2ImgAddText(orgimg,result,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area)
print(result_str)
return orgimg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--detect_model', nargs='+', type=str, default='weights/yolov7-lite-s.pt', help='model.pt path(s)') #检测模型
parser.add_argument('--rec_model', type=str, default='weights/plate_rec_color.pth', help='model.pt path(s)') #车牌识别 +颜色识别
parser.add_argument('--source', type=str, default='imgs', help='source') # file/folder, 0 for webcam
# parser.add_argument('--img-size', nargs= '+', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--output', type=str, default='result', help='source')
parser.add_argument('--kpt-label', type=int, default=4, help='number of keypoints')
device =torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
opt = parser.parse_args()
print(opt)
model = attempt_load(opt.detect_model, map_location=device)
# torch.save()
plate_rec_model=init_model(device,opt.rec_model)
if not os.path.exists(opt.output):
os.mkdir(opt.output)
file_list=[]
allFilePath(opt.source,file_list)
time_b = time.time()
for pic_ in file_list:
print(pic_,end=" ")
img = cv_imread(pic_)
if img.shape[-1]==4:
img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR)
# img = my_letter_box(img)
dict_list=detect_Recognition_plate(model, img, device,plate_rec_model,opt.img_size)
ori_img=draw_result(img,dict_list)
img_name = os.path.basename(pic_)
save_img_path = os.path.join(opt.output,img_name)
cv2.imwrite(save_img_path,ori_img)
print(f"elasted time is {time.time()-time_b} s")
|
2301_81045437/yolov7_plate
|
detect_rec_plate.py
|
Python
|
unknown
| 8,417
|
from detect_rec_plate_macao import detect_Recognition_plate,attempt_load,init_model,allFilePath,cv_imread,draw_result,four_point_transform,order_points
import os
import argparse
import torch
import cv2
import time
import shutil
import numpy as np
import json
import re
pattern_str = "([京津沪渝冀豫云辽黑湘皖鲁新苏浙赣鄂桂甘晋蒙陕吉闽贵粤青藏川宁琼]" \
"{1}(([A-HJ-Z]{1}[A-HJ-NP-Z0-9]{5})|([A-HJ-Z]{1}(([DF]{1}[A-HJ-NP-Z0-9]{1}[0-9]{4})|([0-9]{5}[DF]" \
"{1})))|([A-HJ-Z]{1}[A-D0-9]{1}[0-9]{3}警)))|([0-9]{6}使)|((([沪粤川云桂鄂陕蒙藏黑辽渝]{1}A)|鲁B|闽D|蒙E|蒙H)" \
"[0-9]{4}领)|(WJ[京津沪渝冀豫云辽黑湘皖鲁新苏浙赣鄂桂甘晋蒙陕吉闽贵粤青藏川宁琼·•]{1}[0-9]{4}[TDSHBXJ0-9]{1})" \
"|([VKHBSLJNGCE]{1}[A-DJ-PR-TVY]{1}[0-9]{5})"
def is_car_number(pattern, string):
if re.findall(pattern, string):
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--detect_model', nargs='+', type=str, default=r'runs/train/yolov711/weights/best.pt', help='model.pt path(s)')
parser.add_argument('--rec_model', type=str, default=r'weights/plate_rec.pth', help='model.pt path(s)')
parser.add_argument('--source', type=str, default=r'/mnt/Gpan/Mydata/pytorchPorject/datasets/macao_plate/download/', help='source') # file/folder, 0 for webcam
# parser.add_argument('--source', type=str, default=r'test2', help='source')
# parser.add_argument('--img-size', nargs= '+', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--output', type=str, default=r'/mnt/Gpan/Mydata/pytorchPorject/datasets/macao_plate/result/', help='source')
parser.add_argument('--kpt-label', type=int, default=4, help='number of keypoints')
device =torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
opt = parser.parse_args()
print(opt)
model = attempt_load(opt.detect_model, map_location=device)
# torch.save()
plate_rec_model=init_model(device,opt.rec_model)
if not os.path.exists(opt.output):
os.mkdir(opt.output)
file_list=[]
index_1=0
index_small=0
# error_path =r"E:\study\plate\data\@shaixuan\train\pic"
allFilePath(opt.source,file_list)
time_b = time.time()
for pic_ in file_list:
try:
image_name = os.path.basename(pic_)
# ori_plate = image_name.split("_")[0]
# ori_plate = image_name.split(".")[0]
flag = 1
index_1+=1
label_dict_str=""
lable_dict={}
# label_dict={}
print(index_small,index_1,pic_)
img = cv_imread(pic_)
if img is None:
continue
if img.shape[-1]==4:
img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR)
# img = my_letter_box(img)
count=0
dict_list=detect_Recognition_plate(model, img, device,plate_rec_model,opt.img_size)
for result_ in dict_list:
if not result_:
continue
index_small+=1
landmarks=result_['landmarks']
landmarks_np = np.array(landmarks).reshape(-1,2)
img_roi = four_point_transform(img,landmarks_np)
plate_no= result_['plate_no']
# if len(plate_no)<6 or not is_car_number(pattern_str,plate_no):
if len(plate_no)<6:
continue
height = result_['roi_height']
# if height<48:
# continue
pic_name =plate_no+"_"+str(index_small)+".jpg"
label = result_["label"]
roi_save_folder = os.path.join(opt.output,str(label))
if not os.path.exists(roi_save_folder):
os.mkdir(roi_save_folder)
roi_path = os.path.join(roi_save_folder,pic_name)
cv2.imencode('.jpg',img_roi)[1].tofile(roi_path)
except:
print("error!")
|
2301_81045437/yolov7_plate
|
get_small_pic.py
|
Python
|
unknown
| 4,331
|
"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
"""
from pathlib import Path
import torch
from models.yolo import Model, attempt_load
from utils.general import check_requirements, set_logging
from utils.google_utils import attempt_download
from utils.torch_utils import select_device
dependencies = ['torch', 'yaml']
check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop'))
def create(name, pretrained, channels, classes, autoshape, verbose):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
verbose (bool): print all information to screen
Returns:
YOLOv5 pytorch model
"""
set_logging(verbose=verbose)
fname = f'{name}.pt' # checkpoint filename
try:
if pretrained and channels == 3 and classes == 80:
model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model
else:
cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path
model = Model(cfg, channels, classes) # create model
if pretrained:
attempt_download(fname) # download if not found locally
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
msd = model.state_dict() # model state_dict
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
model.load_state_dict(csd, strict=False) # load
if len(ckpt['model'].names) == classes:
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
return model.to(device)
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url
raise Exception(s) from e
def custom(path_or_model='path/to/model.pt', autoshape=True, verbose=True):
"""YOLOv5-custom model https://github.com/ultralytics/yolov5
Arguments (3 options):
path_or_model (str): 'path/to/model.pt'
path_or_model (dict): torch.load('path/to/model.pt')
path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
Returns:
pytorch model
"""
set_logging(verbose=verbose)
model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict):
model = model['ema' if model.get('ema') else 'model'] # load model
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
hub_model.load_state_dict(model.float().state_dict()) # load state_dict
hub_model.names = model.names # class names
if autoshape:
hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
return hub_model.to(device)
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-small model https://github.com/ultralytics/yolov5
return create('yolov5s', pretrained, channels, classes, autoshape, verbose)
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-medium model https://github.com/ultralytics/yolov5
return create('yolov5m', pretrained, channels, classes, autoshape, verbose)
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-large model https://github.com/ultralytics/yolov5
return create('yolov5l', pretrained, channels, classes, autoshape, verbose)
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5
return create('yolov5x', pretrained, channels, classes, autoshape, verbose)
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
return create('yolov5s6', pretrained, channels, classes, autoshape, verbose)
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
return create('yolov5m6', pretrained, channels, classes, autoshape, verbose)
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
return create('yolov5l6', pretrained, channels, classes, autoshape, verbose)
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True):
# YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
return create('yolov5x6', pretrained, channels, classes, autoshape, verbose)
if __name__ == '__main__':
model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
# model = custom(path_or_model='path/to/model.pt') # custom
# Verify inference
import cv2
import numpy as np
from PIL import Image
imgs = ['data/images/zidane.jpg', # filename
'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI
cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
Image.open('data/images/bus.jpg'), # PIL
np.zeros((320, 640, 3))] # numpy
results = model(imgs) # batched inference
results.print()
results.save()
|
2301_81045437/yolov7_plate
|
hubconf.py
|
Python
|
unknown
| 6,238
|
# init
|
2301_81045437/yolov7_plate
|
models/__init__.py
|
Python
|
unknown
| 6
|
# This file contains modules common to various models
import math
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
import torch.nn.functional as F
from utils.datasets import letterbox
from utils.general import non_max_suppression, non_max_suppression_export, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import time_synchronized
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class MP(nn.Module):
def __init__(self, k=2):
super(MP, self).__init__()
self.m = nn.MaxPool2d(kernel_size=k, stride=k)
def forward(self, x):
return self.m(x)
class SP(nn.Module):
def __init__(self, k=3, s=1):
super(SP, self).__init__()
self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2)
def forward(self, x):
return self.m(x)
class SPF(nn.Module):
def __init__(self, k=3, s=1):
super(SPF, self).__init__()
self.n = (k - 1) // 2
self.m = nn.Sequential(*[nn.MaxPool2d(kernel_size=3, stride=s, padding=1) for _ in range(self.n)])
def forward(self, x):
return self.m(x)
class ImplicitA(nn.Module):
def __init__(self, channel):
super(ImplicitA, self).__init__()
self.channel = channel
self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1))
nn.init.normal_(self.implicit, std=.02)
def forward(self, x):
return self.implicit.expand_as(x) + x
class ImplicitM(nn.Module):
def __init__(self, channel):
super(ImplicitM, self).__init__()
self.channel = channel
self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1))
nn.init.normal_(self.implicit, mean=1., std=.02)
def forward(self, x):
return self.implicit.expand_as(x) * x
class ReOrg(nn.Module):
def __init__(self):
super(ReOrg, self).__init__()
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
if act != "ReLU":
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
else:
self.act = nn.ReLU(inplace=True)
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2)
p = p.unsqueeze(0)
p = p.transpose(0, 3)
p = p.squeeze(3)
e = self.linear(p)
x = p + e
x = self.tr(x)
x = x.unsqueeze(3)
x = x.transpose(0, 3)
x = x.reshape(b, self.c2, w, h)
return x
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, act=True): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1, act=act)
self.cv2 = Conv(c_, c2, 3, 1, g=g, act=act)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.SiLU()
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class BottleneckCSPF(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSPF, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
#self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.SiLU()
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.m(self.cv1(x))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class BottleneckCSP2(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP2, self).__init__()
c_ = int(c2) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv3 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_)
self.act = nn.SiLU()
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
x1 = self.cv1(x)
y1 = self.m(x1)
y2 = self.cv2(x1)
return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, act=True): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1, act=act)
self.cv2 = Conv(c1, c_, 1, 1, act=act)
self.cv3 = Conv(2 * c_, c2, 1, act=act) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0, act=act) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(3, 3, 3)):
print(k)
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
num_3x3_maxpool = []
max_pool_module_list = []
for pool_kernel in k:
assert (pool_kernel-3)%2==0; "Required Kernel size cannot be implemented with kernel_size of 3"
num_3x3_maxpool = 1 + (pool_kernel-3)//2
max_pool_module_list.append(nn.Sequential(*num_3x3_maxpool*[nn.MaxPool2d(kernel_size=3, stride=1, padding=1)]))
#max_pool_module_list[-1] = nn.ModuleList(max_pool_module_list[-1])
self.m = nn.ModuleList(max_pool_module_list)
#self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class SPPCSP(nn.Module):
# CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
super(SPPCSP, self).__init__()
c_ = int(2 * c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = Conv(c_, c_, 3, 1)
self.cv4 = Conv(c_, c_, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
self.cv5 = Conv(4 * c_, c_, 1, 1)
self.cv6 = Conv(c_, c_, 3, 1)
self.bn = nn.BatchNorm2d(2 * c_)
self.act = nn.SiLU()
self.cv7 = Conv(2 * c_, c2, 1, 1)
def forward(self, x):
x1 = self.cv4(self.cv3(self.cv1(x)))
y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
y2 = self.cv2(x)
return self.cv7(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPPCSPC(nn.Module):
# CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
super(SPPCSPC, self).__init__()
c_ = int(2 * c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(c_, c_, 3, 1)
self.cv4 = Conv(c_, c_, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
self.cv5 = Conv(4 * c_, c_, 1, 1)
self.cv6 = Conv(c_, c_, 3, 1)
self.cv7 = Conv(2 * c_, c2, 1, 1)
def forward(self, x):
x1 = self.cv4(self.cv3(self.cv1(x)))
y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
y2 = self.cv2(x)
return self.cv7(torch.cat((y1, y2), dim=1))
class SPPFCSPC(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=5):
super(SPPFCSPC, self).__init__()
c_ = int(2 * c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(c_, c_, 3, 1)
self.cv4 = Conv(c_, c_, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
self.cv5 = Conv(4 * c_, c_, 1, 1)
self.cv6 = Conv(c_, c_, 3, 1)
self.cv7 = Conv(2 * c_, c2, 1, 1)
def forward(self, x):
x1 = self.cv4(self.cv3(self.cv1(x)))
x2 = self.m(x1)
x3 = self.m(x2)
y1 = self.cv6(self.cv5(torch.cat((x1,x2,x3, self.m(x3)),1)))
y2 = self.cv2(x)
return self.cv7(torch.cat((y1, y2), dim=1))
class SPPF(nn.Module):
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
x = self.cv1(x)
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.contract = Contract(gain=2)
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
if hasattr(self, "contract"):
x = self.contract(x)
elif hasattr(self, "conv_slice"):
x = self.conv_slice(x)
else:
x = torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
return self.conv(x)
class ConvFocus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(ConvFocus, self).__init__()
slice_kernel = 3
slice_stride = 2
self.conv_slice = Conv(c1, c1*4, slice_kernel, slice_stride, p, g, act)
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
if hasattr(self, "conv_slice"):
x = self.conv_slice(x)
else:
x = torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
x = self.conv(x)
return x
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
# yolov7-lite
class StemBlock(nn.Module):
def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True):
super(StemBlock, self).__init__()
self.stem_1 = Conv(c1, c2, k, s, p, g, act)
self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0)
self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1)
self.stem_2p = nn.MaxPool2d(kernel_size=2,stride=2,ceil_mode=True)
self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0)
def forward(self, x):
stem_1_out = self.stem_1(x)
stem_2a_out = self.stem_2a(stem_1_out)
stem_2b_out = self.stem_2b(stem_2a_out)
stem_2p_out = self.stem_2p(stem_1_out)
out = self.stem_3(torch.cat((stem_2b_out,stem_2p_out),1))
return out
class conv_bn_relu_maxpool(nn.Module):
def __init__(self, c1, c2): # ch_in, ch_out
super(conv_bn_relu_maxpool, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(c1, c2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(c2),
nn.SiLU(inplace=True),
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
def forward(self, x):
return self.maxpool(self.conv(x))
class DWConvblock(nn.Module):
"Depthwise conv + Pointwise conv"
def __init__(self, in_channels, out_channels, k, s):
super(DWConvblock, self).__init__()
self.p = k // 2
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=k, stride=s, padding=self.p, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.act1 = nn.SiLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.act2 = nn.SiLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
return x
class ADD(nn.Module):
# Stortcut a list of tensors along dimension
def __init__(self, alpha=0.5):
super(ADD, self).__init__()
self.a = alpha
def forward(self, x):
x1, x2 = x[0], x[1]
return torch.add(x1, x2, alpha=self.a)
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class Shuffle_Block(nn.Module):
def __init__(self, inp, oup, stride):
super(Shuffle_Block, self).__init__()
if not (1 <= stride <= 3):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(inplace=True),
)
self.branch2 = nn.Sequential(
nn.Conv2d(inp if (self.stride > 1) else branch_features,
branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(inplace=True),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(inplace=True),
)
@staticmethod
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x):
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
# end of yolov7-lite
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
def __init__(self, conf=0.25, kpt_label=False):
super(NMS, self).__init__()
self.conf=conf
self.kpt_label = kpt_label
def forward(self, x):
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes, kpt_label=self.kpt_label)
class NMS_Export(nn.Module):
# Non-Maximum Suppression (NMS) module used while exporting ONNX model
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
def __init__(self, conf=0.001, kpt_label=False):
super(NMS_Export, self).__init__()
self.conf = conf
self.kpt_label = kpt_label
def forward(self, x):
return non_max_suppression_export(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes, kpt_label=self.kpt_label)
class autoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
def __init__(self, model):
super(autoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_synchronized()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, str): # filename or uri
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(im), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_synchronized())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_synchronized())
# Post-process
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_synchronized())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render or crop:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
else: # all others
plot_one_box(box, im, label=label, color=colors(cls))
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
print(str.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(crop=True, save_dir=save_dir) # crop results
print(f'Saved results to {save_dir}\n')
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
|
2301_81045437/yolov7_plate
|
models/common.py
|
Python
|
unknown
| 29,855
|
# This file contains modules common to various models
import math
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
import torch.nn.functional as F
from utils.datasets import letterbox
from utils.general import non_max_suppression, non_max_suppression_export, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import time_synchronized
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class MP(nn.Module):
def __init__(self, k=2):
super(MP, self).__init__()
self.m = nn.MaxPool2d(kernel_size=k, stride=k)
def forward(self, x):
return self.m(x)
class SP(nn.Module):
def __init__(self, k=3, s=1):
super(SP, self).__init__()
self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2)
def forward(self, x):
return self.m(x)
class ImplicitA(nn.Module):
def __init__(self, channel):
super(ImplicitA, self).__init__()
self.channel = channel
self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1))
nn.init.normal_(self.implicit, std=.02)
def forward(self, x):
return self.implicit.expand_as(x) + x
class ImplicitM(nn.Module):
def __init__(self, channel):
super(ImplicitM, self).__init__()
self.channel = channel
self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1))
nn.init.normal_(self.implicit, mean=1., std=.02)
def forward(self, x):
return self.implicit.expand_as(x) * x
class ReOrg(nn.Module):
def __init__(self):
super(ReOrg, self).__init__()
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
if act != "ReLU":
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
else:
self.act = nn.ReLU(inplace=True)
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2)
p = p.unsqueeze(0)
p = p.transpose(0, 3)
p = p.squeeze(3)
e = self.linear(p)
x = p + e
x = self.tr(x)
x = x.unsqueeze(3)
x = x.transpose(0, 3)
x = x.reshape(b, self.c2, w, h)
return x
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, act=True): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1, act=act)
self.cv2 = Conv(c_, c2, 3, 1, g=g, act=act)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.SiLU()
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class BottleneckCSPF(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSPF, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
#self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.SiLU()
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.m(self.cv1(x))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class BottleneckCSP2(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP2, self).__init__()
c_ = int(c2) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv3 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_)
self.act = nn.SiLU()
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
x1 = self.cv1(x)
y1 = self.m(x1)
y2 = self.cv2(x1)
return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, act=True): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1, act=act)
self.cv2 = Conv(c1, c_, 1, 1, act=act)
self.cv3 = Conv(2 * c_, c2, 1, act=act) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0, act=act) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(3, 3, 3)):
print(k)
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
num_3x3_maxpool = []
max_pool_module_list = []
for pool_kernel in k:
assert (pool_kernel-3)%2==0; "Required Kernel size cannot be implemented with kernel_size of 3"
num_3x3_maxpool = 1 + (pool_kernel-3)//2
max_pool_module_list.append(nn.Sequential(*num_3x3_maxpool*[nn.MaxPool2d(kernel_size=3, stride=1, padding=1)]))
#max_pool_module_list[-1] = nn.ModuleList(max_pool_module_list[-1])
self.m = nn.ModuleList(max_pool_module_list)
#self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class SPPCSP(nn.Module):
# CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
super(SPPCSP, self).__init__()
c_ = int(2 * c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = Conv(c_, c_, 3, 1)
self.cv4 = Conv(c_, c_, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
self.cv5 = Conv(4 * c_, c_, 1, 1)
self.cv6 = Conv(c_, c_, 3, 1)
self.bn = nn.BatchNorm2d(2 * c_)
self.act = nn.SiLU()
self.cv7 = Conv(2 * c_, c2, 1, 1)
def forward(self, x):
x1 = self.cv4(self.cv3(self.cv1(x)))
y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
y2 = self.cv2(x)
return self.cv7(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPPCSPC(nn.Module):
# CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
super(SPPCSPC, self).__init__()
c_ = int(2 * c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(c_, c_, 3, 1)
self.cv4 = Conv(c_, c_, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
self.cv5 = Conv(4 * c_, c_, 1, 1)
self.cv6 = Conv(c_, c_, 3, 1)
self.cv7 = Conv(2 * c_, c2, 1, 1)
def forward(self, x):
x1 = self.cv4(self.cv3(self.cv1(x)))
y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
y2 = self.cv2(x)
return self.cv7(torch.cat((y1, y2), dim=1))
class SPPF(nn.Module):
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
x = self.cv1(x)
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.contract = Contract(gain=2)
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
if hasattr(self, "contract"):
x = self.contract(x)
elif hasattr(self, "conv_slice"):
x = self.conv_slice(x)
else:
x = torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
return self.conv(x)
class ConvFocus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(ConvFocus, self).__init__()
slice_kernel = 3
slice_stride = 2
self.conv_slice = Conv(c1, c1*4, slice_kernel, slice_stride, p, g, act)
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
if hasattr(self, "conv_slice"):
x = self.conv_slice(x)
else:
x = torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
x = self.conv(x)
return x
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
# yolov7-lite
class conv_bn_relu_maxpool(nn.Module):
def __init__(self, c1, c2): # ch_in, ch_out
super(conv_bn_relu_maxpool, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(c1, c2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(c2),
nn.ReLU(inplace=True),
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
def forward(self, x):
return self.maxpool(self.conv(x))
class DWConvblock(nn.Module):
"Depthwise conv + Pointwise conv"
def __init__(self, in_channels, out_channels, k, s):
super(DWConvblock, self).__init__()
self.p = k // 2
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=k, stride=s, padding=self.p, groups=in_channels,
bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
return x
class ADD(nn.Module):
# Stortcut a list of tensors along dimension
def __init__(self, alpha=0.5):
super(ADD, self).__init__()
self.a = alpha
def forward(self, x):
x1, x2 = x[0], x[1]
return torch.add(x1, x2, alpha=self.a)
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class Shuffle_Block(nn.Module):
def __init__(self, inp, oup, stride):
super(Shuffle_Block, self).__init__()
if not (1 <= stride <= 3):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
self.branch2 = nn.Sequential(
nn.Conv2d(inp if (self.stride > 1) else branch_features,
branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
@staticmethod
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x):
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
# end of yolov7-lite
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
def __init__(self, conf=0.25, kpt_label=False):
super(NMS, self).__init__()
self.conf=conf
self.kpt_label = kpt_label
def forward(self, x):
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes, kpt_label=self.kpt_label)
class NMS_Export(nn.Module):
# Non-Maximum Suppression (NMS) module used while exporting ONNX model
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
def __init__(self, conf=0.001, kpt_label=False):
super(NMS_Export, self).__init__()
self.conf = conf
self.kpt_label = kpt_label
def forward(self, x):
return non_max_suppression_export(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes, kpt_label=self.kpt_label)
class autoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
def __init__(self, model):
super(autoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_synchronized()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, str): # filename or uri
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(im), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_synchronized())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_synchronized())
# Post-process
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_synchronized())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render or crop:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
else: # all others
plot_one_box(box, im, label=label, color=colors(cls))
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
print(str.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(crop=True, save_dir=save_dir) # crop results
print(f'Saved results to {save_dir}\n')
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
|
2301_81045437/yolov7_plate
|
models/common_ori.py
|
Python
|
unknown
| 28,032
|
# This file contains experimental modules
import numpy as np
import torch
import torch.nn as nn
from models.common import Conv, DWConv
from utils.google_utils import attempt_download
class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super(CrossConv, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super(Sum, self).__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super(GhostConv, self).__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class MixConv2d(nn.Module):
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super(MixConv2d, self).__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.ReLU(inplace=True)
def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()
def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def attempt_load(weights, map_location=None, inplace=True):
from models.yolo import Detect, Model
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
attempt_download(w)
ckpt = torch.load(w, map_location=map_location) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
m.inplace = inplace # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble
|
2301_81045437/yolov7_plate
|
models/experimental.py
|
Python
|
unknown
| 5,211
|
"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
Usage:
$ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1
"""
import argparse
import sys
import time
from pathlib import Path
sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
import torch
import torch.nn as nn
from torch.utils.mobile_optimizer import optimize_for_mobile
import cv2
import numpy as np
import models
from models.experimental import attempt_load
from utils.activations import Hardswish, SiLU
from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging
from utils.torch_utils import select_device
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only
parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only
parser.add_argument('--export-nms', action='store_true', help='export the nms part in ONNX model') # ONNX-only, #opt.grid has to be set True for nms export to work
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
set_logging()
t = time.time()
# Load PyTorch model
device = select_device(opt.device)
model = attempt_load(opt.weights, map_location=device) # load FP32 model
labels = model.names
# Checks
gs = int(max(model.stride)) # grid size (max stride)
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
# Input
img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection
# img = cv2.imread("/user/a0132471/Files/bit-bucket/pytorch/jacinto-ai-pytest/data/results/datasets/pytorch_coco_mmdet_img_resize640_val2017_5k_yolov5/images/val2017/000000000139.png")
# img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
# img = np.ascontiguousarray(img)
# img = torch.tensor(img[None,:,:,:], dtype = torch.float32)
# img /= 255
# Update model
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if isinstance(m, models.common.Conv): # assign export-friendly activations
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
# elif isinstance(m, models.yolo.Detect):
# m.forward = m.forward_export # assign forward (optional)
model.model[-1].export = not (opt.grid or opt.export_nms) # set Detect() layer grid export
model.model[-1].export_cat = True #onnx export
for _ in range(2):
y = model(img) # dry runs
output_names = ["output"]
if opt.export_nms:
nms = models.common.NMS(conf=0.01, kpt_label=4)
nms_export = models.common.NMS_Export(conf=0.01, kpt_label=4)
y_export = nms_export(y)
y = nms(y)
#assert (torch.sum(torch.abs(y_export[0]-y[0]))<1e-6)
model_nms = torch.nn.Sequential(model, nms_export)
model_nms.eval()
output_names = ['detections']
print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)")
# TorchScript export -----------------------------------------------------------------------------------------------
# prefix = colorstr('TorchScript:')
# try:
# print(f'\n{prefix} starting export with torch {torch.__version__}...')
# f = opt.weights.replace('.pt', '.torchscript.pt') # filename
# ts = torch.jit.trace(model, img, strict=False)
# ts = optimize_for_mobile(ts) # https://pytorch.org/tutorials/recipes/script_optimized.html
# ts.save(f)
# print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
# except Exception as e:
# print(f'{prefix} export failure: {e}')
# ONNX export ------------------------------------------------------------------------------------------------------
prefix = colorstr('ONNX:')
try:
import onnx
print(f'{prefix} starting export with onnx {onnx.__version__}...')
f = opt.weights.replace('.pt', '.onnx') # filename
if opt.export_nms:
torch.onnx.export(model_nms, img, f, verbose=False, opset_version=11, input_names=['images'], output_names=output_names,
dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)
else:
torch.onnx.export(model, img, f, verbose=False, opset_version=11, input_names=['images'], output_names=output_names,
dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# print(onnx.helper.printable_graph(model_onnx.graph)) # print
# Simplify
if opt.simplify:
try:
check_requirements(['onnx-simplifier'])
import onnxsim
print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
model_onnx, check = onnxsim.simplify(model_onnx,
dynamic_input_shape=opt.dynamic,
input_shapes={'images': list(img.shape)} if opt.dynamic else None)
assert check, 'assert check failed'
onnx.save(model_onnx, f)
except Exception as e:
print(f'{prefix} simplifier failure: {e}')
print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
print(f'{prefix} export failure: {e}')
# CoreML export ----------------------------------------------------------------------------------------------------
prefix = colorstr('CoreML:')
try:
import coremltools as ct
print(f'{prefix} starting export with coremltools {ct.__version__}...')
# convert model from torchscript and apply pixel scaling as per detect.py
model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
f = opt.weights.replace('.pt', '.mlmodel') # filename
model.save(f)
print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
print(f'{prefix} export failure: {e}')
# Finish
print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.')
|
2301_81045437/yolov7_plate
|
models/export.py
|
Python
|
unknown
| 7,492
|
# YOLOv5 YOLO-specific modules
import argparse
import logging
import sys
from copy import deepcopy
from pathlib import Path
sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), nkpt=None, ch=(), inplace=True, dw_conv_kpt=False): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.nkpt = nkpt
self.dw_conv_kpt = dw_conv_kpt
self.no_det=(nc + 5) # number of outputs per anchor for box and class
self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints
self.no = self.no_det+self.no_kpt
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
self.flip_test = False
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv
if self.nkpt is not None:
if self.dw_conv_kpt: #keypoint head is slightly more complex
self.m_kpt = nn.ModuleList(
nn.Sequential(DWConv(x, x, k=3), Conv(x,x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), Conv(x,x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch)
else: #keypoint head is a single convolution
self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch)
self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
if self.nkpt is None or self.nkpt==0:
x[i] = self.m[i](x[i])
else :
x[i] = torch.cat((self.m[i](x[i]), self.m_kpt[i](x[i])), axis=1)
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
x_det = x[i][..., :6]
x_kpt = x[i][..., 6:]
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
kpt_grid_x = self.grid[i][..., 0:1]
kpt_grid_y = self.grid[i][..., 1:2]
if self.nkpt == 0:
y = x[i].sigmoid()
else:
y = x_det.sigmoid()
if self.inplace:
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
if self.nkpt != 0:
x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
#x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][:,0].repeat(self.nkpt,1).permute(1,0).view(1, self.na, 1, 1, self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy
#x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][:,0].repeat(self.nkpt,1).permute(1,0).view(1, self.na, 1, 1, self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy
x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid()
y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1)
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
if self.nkpt != 0:
y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy
y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class IDetect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), nkpt=None, ch=(), inplace=True, dw_conv_kpt=False): # detection layer
super(IDetect, self).__init__()
self.nc = nc # number of classes
self.nkpt = nkpt
self.dw_conv_kpt = dw_conv_kpt
self.no_det=(nc + 5) # number of outputs per anchor for box and class
self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints
self.no = self.no_det+self.no_kpt
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
self.flip_test = False
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv
self.ia = nn.ModuleList(ImplicitA(x) for x in ch)
self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch)
if self.nkpt is not None:
if self.dw_conv_kpt: #keypoint head is slightly more complex
self.m_kpt = nn.ModuleList(
nn.Sequential(DWConv(x, x, k=3), Conv(x,x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), Conv(x,x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch)
else: #keypoint head is a single convolution
self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch)
self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
if self.nkpt is None or self.nkpt==0:
x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv
else :
x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1)
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
x_det = x[i][..., :6]
x_kpt = x[i][..., 6:]
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
kpt_grid_x = self.grid[i][..., 0:1]
kpt_grid_y = self.grid[i][..., 1:2]
if self.nkpt == 0:
y = x[i].sigmoid()
else:
y = x_det.sigmoid()
if self.inplace:
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
if self.nkpt != 0:
x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
#x_kpt[..., 0::3] = (x_kpt[..., ::3] + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy
#x_kpt[..., 1::3] = (x_kpt[..., 1::3] + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy
#print('=============')
#print(self.anchor_grid[i].shape)
#print(self.anchor_grid[i][...,0].unsqueeze(4).shape)
#print(x_kpt[..., 0::3].shape)
#x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy
#x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy
#x_kpt[..., 0::3] = (((x_kpt[..., 0::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy
#x_kpt[..., 1::3] = (((x_kpt[..., 1::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy
x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid()
y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1)
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
if self.nkpt != 0:
y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy
y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class IKeypoint(nn.Module):
stride = None # strides computed during build
export = False # onnx export
export_cat = False # onnx export cat output
def __init__(self, nc=80, anchors=(), nkpt=5, ch=(), inplace=True, dw_conv_kpt=False): # detection layer
super(IKeypoint, self).__init__()
self.nc = nc # number of classes
self.nkpt = nkpt
self.dw_conv_kpt = dw_conv_kpt
self.no_det=(nc + 5) # number of outputs per anchor for box and class
self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints
self.no = self.no_det+self.no_kpt
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
self.flip_test = False
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv
self.ia = nn.ModuleList(ImplicitA(x) for x in ch)
self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch)
if self.nkpt is not None:
if self.dw_conv_kpt: #keypoint head is slightly more complex
self.m_kpt = nn.ModuleList(
nn.Sequential(DWConv(x, x, k=3), Conv(x,x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), Conv(x,x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), Conv(x, x),
DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch)
else: #keypoint head is a single convolution
self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch)
self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
if self.export_cat:
for i in range(self.nl):
x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1)
bs, _, ny, nx = map(int,x[i].shape) # x(bs,255,20,20) to x(bs,3,20,20,85)
bs=-1
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
x_det = x[i][..., :5+self.nc]
x_kpt = x[i][..., 5+self.nc:]
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
kpt_grid_x = self.grid[i][:,:,:,:, 0:1]
kpt_grid_y = self.grid[i][:,:,:,:, 1:2]
y = x_det.sigmoid()
xy = (y[:,:,:,:, 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[:,:,:,:, 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
classfify=y[...,4:]
# x_kpt[:,:,:,:, 0::3] = (x_kpt[:,:,:,:, ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
# x_kpt[:,:,:,:,1::3] = (x_kpt[:,:,:,:, 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
# x_kpt[:,:,:,:,2::3] = x_kpt[:,:,:,:, 2::3].sigmoid()
x1=(x_kpt[:,:,:,:, 0:1] * 2. - 0.5 + kpt_grid_x) * self.stride[i]
y1=(x_kpt[:,:,:,:, 1:2] * 2. - 0.5 + kpt_grid_y) * self.stride[i]
s1=x_kpt[:,:,:,:, 2:3].sigmoid()
landmarks1=torch.cat((x1,y1,s1),-1)
x2=(x_kpt[:,:,:,:, 3:4] * 2. - 0.5 + kpt_grid_x) * self.stride[i]
y2=(x_kpt[:,:,:,:, 4:5] * 2. - 0.5 + kpt_grid_y) * self.stride[i]
s2=x_kpt[:,:,:,:, 5:6].sigmoid()
landmarks2=torch.cat((x2,y2,s2),-1)
x3=(x_kpt[:,:,:,:, 6:7] * 2. - 0.5 + kpt_grid_x) * self.stride[i]
y3=(x_kpt[:,:,:,:, 7:8] * 2. - 0.5 + kpt_grid_y) * self.stride[i]
s3=x_kpt[:,:,:,:, 8:9].sigmoid()
landmarks3=torch.cat((x3,y3,s3),-1)
x4=(x_kpt[:,:,:,:, 9:10] * 2. - 0.5 + kpt_grid_x) * self.stride[i]
y4=(x_kpt[:,:,:,:, 10:11] * 2. - 0.5 + kpt_grid_y) * self.stride[i]
s4=x_kpt[:,:,:,:, 11:12].sigmoid()
landmarks4=torch.cat((x4,y4,s4),-1)
y = torch.cat((xy, wh, classfify, landmarks1,landmarks2,landmarks3,landmarks4), dim = -1)
z.append(y.view(bs, self.na*nx*ny, self.no))
return torch.cat(z,1)
for i in range(self.nl):
if self.nkpt is None or self.nkpt==0:
x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv
else :
x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1)
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
x_det = x[i][..., :5+self.nc]
x_kpt = x[i][..., 5+self.nc:]
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
kpt_grid_x = self.grid[i][..., 0:1]
kpt_grid_y = self.grid[i][..., 1:2]
if self.nkpt == 0:
y = x[i].sigmoid()
else:
y = x_det.sigmoid()
if self.inplace:
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
if self.nkpt != 0:
x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,self.nkpt)) * self.stride[i] # xy
x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid()
y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1)
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
if self.nkpt != 0:
y[..., 5+self.nc:] = (y[..., 5+self.nc:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy
y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.safe_load(f) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
self.inplace = self.yaml.get('inplace', True)
# logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IKeypoint):
s = 256 # 2x min stride
m.inplace = self.inplace
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# logger.info('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
return self.forward_augment(x) # augmented inference, None
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_augment(self, x):
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi = self._descale_pred(yi, fi, si, img_size)
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if isinstance(m, nn.Upsample):
m.recompute_scale_factor = False
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
if m == self.model[0]:
logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}")
logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
logger.info('%.1fms total' % sum(dt))
return x
def _descale_pred(self, p, flips, scale, img_size):
# de-scale predictions following augmented inference (inverse operation)
if self.inplace:
p[..., :4] /= scale # de-scale
if flips == 2:
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
elif flips == 3:
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
else:
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
if flips == 2:
y = img_size[0] - y # de-flip ud
elif flips == 3:
x = img_size[1] - x # de-flip lr
p = torch.cat((x, y, wh, p[..., 4:]), -1)
return p
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
logger.info(
('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
logger.info('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
logger.info('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
logger.info('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
logger.info('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, nkpt, gd, gw = d['anchors'], d['nc'], d['nkpt'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5 + 2*nkpt) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
args_dict = {}
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, ConvFocus, CrossConv, BottleneckCSP,
C3, C3TR, BottleneckCSPF, BottleneckCSP2, SPPCSP, SPPCSPC, SPPF, conv_bn_relu_maxpool, Shuffle_Block, DWConvblock,StemBlock]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR, BottleneckCSPF, BottleneckCSP2, SPPCSP, SPPCSPC]:
args.insert(2, n) # number of repeats
n = 1
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, DWConv, MixConv2d, Focus, ConvFocus, CrossConv, BottleneckCSP, C3, C3TR]:
if 'act' in d.keys():
args_dict = {"act" : d['act']}
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is ADD:
c2 = sum([ch[x] for x in f])//2
elif m in [Detect, IDetect, IKeypoint]:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
if 'dw_conv_kpt' in d.keys():
args_dict = {"dw_conv_kpt" : d['dw_conv_kpt']}
elif m is ReOrg:
c2 = ch[f] * 4
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args, **args_dict) for _ in range(n)]) if n > 1 else m(*args, **args_dict) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device)
# y = model(img, profile=True)
# Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898)
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter('.')
# logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
|
2301_81045437/yolov7_plate
|
models/yolo.py
|
Python
|
unknown
| 30,465
|
"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
Usage:
$ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1
"""
import argparse
import sys
import time
from pathlib import Path
sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
import onnx
import torch
import torch.nn as nn
from torch.utils.mobile_optimizer import optimize_for_mobile
import cv2
import numpy as np
import models
from models.experimental import attempt_load
from utils.activations import Hardswish, SiLU
from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging
from utils.torch_utils import select_device
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='runs/train/yolov714/weights/best.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
set_logging()
t = time.time()
# Load PyTorch model
device = select_device(opt.device)
model = attempt_load(opt.weights, map_location=device) # load FP32 model
labels = model.names
# Checks
gs = int(max(model.stride)) # grid size (max stride)
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
# Input
img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection
# Update model
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if isinstance(m, models.common.Conv): # assign export-friendly activations
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
# elif isinstance(m, models.yolo.Detect):
# m.forward = m.forward_export # assign forward (optional)
model.model[-1].export = True # set Detect() layer grid export
for _ in range(2):
y = model(img) # dry runs
output_names = None
print(f'starting export with onnx {onnx.__version__}...')
f = opt.weights.replace('.pt', '.onnx') # filename
torch.onnx.export(model, img, f, verbose=False, opset_version=11, input_names=['data'],
output_names=['stride_' + str(int(x)) for x in model.stride])
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# print(onnx.helper.printable_graph(model_onnx.graph)) # print
# Finish
print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.')
|
2301_81045437/yolov7_plate
|
ncnn/ncnn_export.py
|
Python
|
unknown
| 3,172
|
import onnxruntime
import numpy as np
import cv2
import copy
import os
import argparse
from PIL import Image, ImageDraw, ImageFont
import time
plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品"
mean_value,std_value=((0.588,0.193))#识别模型均值标准差
def decodePlate(preds): #识别后处理
pre=0
newPreds=[]
for i in range(len(preds)):
if preds[i]!=0 and preds[i]!=pre:
newPreds.append(preds[i])
pre=preds[i]
plate=""
for i in newPreds:
plate+=plateName[int(i)]
return plate
# return newPreds
def rec_pre_precessing(img,size=(48,168)): #识别前处理
img =cv2.resize(img,(168,48))
img = img.astype(np.float32)
img = (img/255-mean_value)/std_value #归一化 减均值 除标准差
img = img.transpose(2,0,1) #h,w,c 转为 c,h,w
img = img.reshape(1,*img.shape) #channel,height,width转为batch,channel,height,channel
return img
def get_plate_result(img,session_rec): #识别后处理
img =rec_pre_precessing(img)
y_onnx = session_rec.run([session_rec.get_outputs()[0].name], {session_rec.get_inputs()[0].name: img})[0]
# print(y_onnx[0])
index =np.argmax(y_onnx[0],axis=1) #找出概率最大的那个字符的序号
# print(y_onnx[0])
plate_no = decodePlate(index)
# plate_no = decodePlate(y_onnx[0])
return plate_no
def allFilePath(rootPath,allFIleList): #遍历文件
fileList = os.listdir(rootPath)
for temp in fileList:
if os.path.isfile(os.path.join(rootPath,temp)):
allFIleList.append(os.path.join(rootPath,temp))
else:
allFilePath(os.path.join(rootPath,temp),allFIleList)
def get_split_merge(img): #双层车牌进行分割后识别
h,w,c = img.shape
img_upper = img[0:int(5/12*h),:]
img_lower = img[int(1/3*h):,:]
img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0]))
new_img = np.hstack((img_upper,img_lower))
return new_img
def order_points(pts): # 关键点排列 按照(左上,右上,右下,左下)的顺序排列
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image, pts): #透视变换得到矫正后的图像,方便识别
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def my_letter_box(img,size=(640,640)): #
h,w,c = img.shape
r = min(size[0]/h,size[1]/w)
new_h,new_w = int(h*r),int(w*r)
top = int((size[0]-new_h)/2)
left = int((size[1]-new_w)/2)
bottom = size[0]-new_h-top
right = size[1]-new_w-left
img_resize = cv2.resize(img,(new_w,new_h))
img = cv2.copyMakeBorder(img_resize,top,bottom,left,right,borderType=cv2.BORDER_CONSTANT,value=(114,114,114))
return img,r,left,top
def xywh2xyxy(boxes): #xywh坐标变为 左上 ,右下坐标 x1,y1 x2,y2
xywh =copy.deepcopy(boxes)
xywh[:,0]=boxes[:,0]-boxes[:,2]/2
xywh[:,1]=boxes[:,1]-boxes[:,3]/2
xywh[:,2]=boxes[:,0]+boxes[:,2]/2
xywh[:,3]=boxes[:,1]+boxes[:,3]/2
return xywh
def my_nms(boxes,iou_thresh): #nms
index = np.argsort(boxes[:,4])[::-1]
keep = []
while index.size >0:
i = index[0]
keep.append(i)
x1=np.maximum(boxes[i,0],boxes[index[1:],0])
y1=np.maximum(boxes[i,1],boxes[index[1:],1])
x2=np.minimum(boxes[i,2],boxes[index[1:],2])
y2=np.minimum(boxes[i,3],boxes[index[1:],3])
w = np.maximum(0,x2-x1)
h = np.maximum(0,y2-y1)
inter_area = w*h
union_area = (boxes[i,2]-boxes[i,0])*(boxes[i,3]-boxes[i,1])+(boxes[index[1:],2]-boxes[index[1:],0])*(boxes[index[1:],3]-boxes[index[1:],1])
iou = inter_area/(union_area-inter_area)
idx = np.where(iou<=iou_thresh)[0]
index = index[idx+1]
return keep
def restore_box(boxes,r,left,top): #返回原图上面的坐标
boxes[:,[0,2,5,7,9,11]]-=left
boxes[:,[1,3,6,8,10,12]]-=top
boxes[:,[0,2,5,7,9,11]]/=r
boxes[:,[1,3,6,8,10,12]]/=r
return boxes
def restore_box_2(boxes,r,left,top): #返回原图上面的坐标
boxes[:,[0,2,5,7]]-=left
boxes[:,[1,3,6,8]]-=top
boxes[:,[0,2,5,7]]/=r
boxes[:,[1,3,6,8]]/=r
return boxes
def detect_pre_precessing(img,img_size): #检测前处理
img,r,left,top=my_letter_box(img,img_size)
# cv2.imwrite("1.jpg",img)
img =img[:,:,::-1].transpose(2,0,1).copy().astype(np.float32)
img=img/255
img=img.reshape(1,*img.shape)
return img,r,left,top
def post_precessing(dets,r,left,top,conf_thresh=0.3,iou_thresh=0.45):#检测后处理
num_cls=2
choice = dets[:,:,4]>conf_thresh
dets=dets[choice]
dets[:,5:5+num_cls]*=dets[:,4:5] #5::7是2分类类别分数
box = dets[:,:4]
boxes = xywh2xyxy(box)
score= np.max(dets[:,5:5+num_cls],axis=-1,keepdims=True)
index = np.argmax(dets[:,5:5+num_cls],axis=-1).reshape(-1,1)
kpt_b=5+num_cls
landmarks=dets[:,[kpt_b,kpt_b+1,kpt_b+3,kpt_b+4,kpt_b+6,kpt_b+7,kpt_b+9,kpt_b+10]] #yolov7关键有三个数,x,y,score,这里我们只需要x,y
# landmarks=dets[:,[kpt_b,kpt_b+1,kpt_b+3,kpt_b+4]]
output = np.concatenate((boxes,score,landmarks,index),axis=1)
reserve_=my_nms(output,iou_thresh)
output=output[reserve_]
output = restore_box(output,r,left,top)
# output = restore_box_2(output,r,left,top)
return output
def rec_plate(outputs,img0,session_rec): #识别车牌
dict_list=[]
for output in outputs:
result_dict={}
rect=output[:4].tolist()
land_marks = output[5:13].reshape(4,2)
# landmarks = [landmarks[0],landmarks[1],landmarks[3],landmarks[4],landmarks[6],landmarks[7],landmarks[9],landmarks[10]]
roi_img = four_point_transform(img0,land_marks)
label = int(output[-1])
score = output[4]
if label==1: #代表是双层车牌
roi_img = get_split_merge(roi_img)
plate_no = get_plate_result(roi_img,session_rec)
result_dict['rect']=rect
result_dict['landmarks']=land_marks.tolist()
result_dict['plate_no']=plate_no
result_dict['roi_height']=roi_img.shape[0]
dict_list.append(result_dict)
return dict_list
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): #将识别结果画在图上
if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(
"fonts/platech.ttf", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_result(orgimg,dict_list):
result_str =""
for result in dict_list:
rect_area = result['rect']
x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1]
padding_w = 0.05*w
padding_h = 0.11*h
rect_area[0]=max(0,int(x-padding_w))
rect_area[1]=min(orgimg.shape[1],int(y-padding_h))
rect_area[2]=max(0,int(rect_area[2]+padding_w))
rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h))
height_area = result['roi_height']
landmarks=result['landmarks']
result = result['plate_no']
result_str+=result+" "
for i in range(4): #关键点
cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1)
cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(0,0,255),2) #画框
if len(result)>6:
orgimg=cv2ImgAddText(orgimg,result,rect_area[0]-height_area,rect_area[1]-height_area-10,(255,0,0),height_area)
print(result_str)
return orgimg
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--detect_model',type=str, default=r'weights/yolov7-lite-s.onnx', help='model.pt path(s)') #检测模型
parser.add_argument('--rec_model', type=str, default='weights/plate_rec.onnx', help='model.pt path(s)')#识别模型
parser.add_argument('--image_path', type=str, default=r'pic', help='source')
parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--output', type=str, default='result', help='source')
opt = parser.parse_args()
file_list = []
allFilePath(opt.image_path,file_list)
providers = ['CPUExecutionProvider']
clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)]
img_size = (opt.img_size,opt.img_size)
session_detect = onnxruntime.InferenceSession(opt.detect_model, providers=providers )
session_rec = onnxruntime.InferenceSession(opt.rec_model, providers=providers )
if not os.path.exists(opt.output):
os.mkdir(opt.output)
save_path = opt.output
count = 0
begin = time.time()
for pic_ in file_list:
count+=1
print(count,pic_,end=" ")
img=cv2.imread(pic_)
img0 = copy.deepcopy(img)
img,r,left,top = detect_pre_precessing(img,img_size) #检测前处理
# print(img.shape)
y_onnx = session_detect.run([session_detect.get_outputs()[0].name], {session_detect.get_inputs()[0].name: img})[0]
outputs = post_precessing(y_onnx,r,left,top) #检测后处理
result_list=rec_plate(outputs,img0,session_rec)
ori_img = draw_result(img0,result_list)
img_name = os.path.basename(pic_)
save_img_path = os.path.join(save_path,img_name)
cv2.imwrite(save_img_path,ori_img)
print(f"总共耗时{time.time()-begin} s")
|
2301_81045437/yolov7_plate
|
onnx/yolov7_plate_onnx_infer.py
|
Python
|
unknown
| 10,679
|
import os
import cv2
import numpy as np
def get_split_merge(img):
h,w,c = img.shape
img_upper = img[0:int(5/12*h),:]
img_lower = img[int(1/3*h):,:]
img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0]))
new_img = np.hstack((img_upper,img_lower))
return new_img
if __name__=="__main__":
img = cv2.imread("double_plate/tmp8078.png")
new_img =get_split_merge(img)
cv2.imwrite("double_plate/new.jpg",new_img)
|
2301_81045437/yolov7_plate
|
plate_recognition/double_plate_split_merge.py
|
Python
|
unknown
| 461
|
import torch.nn as nn
import torch
class myNet_ocr(nn.Module):
def __init__(self,cfg=None,num_classes=78,export=False):
super(myNet_ocr, self).__init__()
if cfg is None:
cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256]
# cfg =[32,32,'M',64,64,'M',128,128,'M',256,256]
self.feature = self.make_layers(cfg, True)
self.export = export
# self.classifier = nn.Linear(cfg[-1], num_classes)
# self.loc = nn.MaxPool2d((2, 2), (5, 1), (0, 1),ceil_mode=True)
# self.loc = nn.AvgPool2d((2, 2), (5, 2), (0, 1),ceil_mode=False)
self.loc = nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False)
self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1)
# self.newBn=nn.BatchNorm2d(num_classes)
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for i in range(len(cfg)):
if i == 0:
conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = cfg[i]
else :
if cfg[i] == 'M':
layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = cfg[i]
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
x=self.loc(x)
x=self.newCnn(x)
# x=self.newBn(x)
if self.export:
conv = x.squeeze(2) # b *512 * width
conv = conv.transpose(2,1) # [w, b, c]
# conv =conv.argmax(dim=2)
return conv
else:
b, c, h, w = x.size()
assert h == 1, "the height of conv must be 1"
conv = x.squeeze(2) # b *512 * width
conv = conv.permute(2, 0, 1) # [w, b, c]
# output = F.log_softmax(self.rnn(conv), dim=2)
output = torch.softmax(conv, dim=2)
return output
myCfg = [32,'M',64,'M',96,'M',128,'M',256]
class myNet(nn.Module):
def __init__(self,cfg=None,num_classes=3):
super(myNet, self).__init__()
if cfg is None:
cfg = myCfg
self.feature = self.make_layers(cfg, True)
self.classifier = nn.Linear(cfg[-1], num_classes)
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for i in range(len(cfg)):
if i == 0:
conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = cfg[i]
else :
if cfg[i] == 'M':
layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=1,stride =1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = cfg[i]
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
x = nn.AvgPool2d(kernel_size=3, stride=1)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
class MyNet_color(nn.Module):
def __init__(self, class_num=6):
super(MyNet_color, self).__init__()
self.class_num = class_num
self.backbone = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(5, 5), stride=(1, 1)), # 0
torch.nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Dropout(0),
nn.Flatten(),
nn.Linear(480, 64),
nn.Dropout(0),
nn.ReLU(),
nn.Linear(64, class_num),
nn.Dropout(0),
nn.Softmax(1)
)
def forward(self, x):
logits = self.backbone(x)
return logits
class myNet_ocr_color(nn.Module):
def __init__(self,cfg=None,num_classes=78,export=False,color_num=None):
super(myNet_ocr_color, self).__init__()
if cfg is None:
cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256]
# cfg =[32,32,'M',64,64,'M',128,128,'M',256,256]
self.feature = self.make_layers(cfg, True)
self.export = export
self.color_num=color_num
self.conv_out_num=12 #颜色第一个卷积层输出通道12
if self.color_num:
self.conv1=nn.Conv2d(cfg[-1],self.conv_out_num,kernel_size=3,stride=2)
self.bn1=nn.BatchNorm2d(self.conv_out_num)
self.relu1=nn.ReLU(inplace=True)
self.gap =nn.AdaptiveAvgPool2d(output_size=1)
self.color_classifier=nn.Conv2d(self.conv_out_num,self.color_num,kernel_size=1,stride=1)
self.color_bn = nn.BatchNorm2d(self.color_num)
self.flatten = nn.Flatten()
self.loc = nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False)
self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1)
# self.newBn=nn.BatchNorm2d(num_classes)
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for i in range(len(cfg)):
if i == 0:
conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = cfg[i]
else :
if cfg[i] == 'M':
layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = cfg[i]
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
if self.color_num:
x_color=self.conv1(x)
x_color=self.bn1(x_color)
x_color =self.relu1(x_color)
x_color = self.color_classifier(x_color)
x_color = self.color_bn(x_color)
x_color =self.gap(x_color)
x_color = self.flatten(x_color)
x=self.loc(x)
x=self.newCnn(x)
if self.export:
conv = x.squeeze(2) # b *512 * width
conv = conv.transpose(2,1) # [w, b, c]
if self.color_num:
return conv,x_color
return conv
else:
b, c, h, w = x.size()
assert h == 1, "the height of conv must be 1"
conv = x.squeeze(2) # b *512 * width
conv = conv.permute(2, 0, 1) # [w, b, c]
output = F.log_softmax(conv, dim=2)
if self.color_num:
return output,x_color
return output
if __name__ == '__main__':
x = torch.randn(1,3,48,216)
model = myNet_ocr(num_classes=78,export=True)
out = model(x)
print(out.shape)
|
2301_81045437/yolov7_plate
|
plate_recognition/plateNet.py
|
Python
|
unknown
| 8,019
|
from plate_recognition.plateNet import myNet_ocr,myNet_ocr_color
import torch
import torch.nn as nn
import cv2
import numpy as np
import os
import time
import sys
def cv_imread(path): #可以读取中文路径的图片
img=cv2.imdecode(np.fromfile(path,dtype=np.uint8),-1)
return img
def allFilePath(rootPath,allFIleList):
fileList = os.listdir(rootPath)
for temp in fileList:
if os.path.isfile(os.path.join(rootPath,temp)):
if temp.endswith('.jpg') or temp.endswith('.png') or temp.endswith('.JPG'):
allFIleList.append(os.path.join(rootPath,temp))
else:
allFilePath(os.path.join(rootPath,temp),allFIleList)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device("cpu")
color=['黑色','蓝色','绿色','白色','黄色']
plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品"
mean_value,std_value=(0.588,0.193)
def decodePlate(preds):
pre=0
newPreds=[]
index=[]
for i in range(len(preds)):
if preds[i]!=0 and preds[i]!=pre:
newPreds.append(preds[i])
index.append(i)
pre=preds[i]
return newPreds,index
def image_processing(img,device):
img = cv2.resize(img, (168,48))
img = np.reshape(img, (48, 168, 3))
# normalize
img = img.astype(np.float32)
img = (img / 255. - mean_value) / std_value
img = img.transpose([2, 0, 1])
img = torch.from_numpy(img)
img = img.to(device)
img = img.view(1, *img.size())
return img
def get_plate_result(img,device,model,is_color=True):
input = image_processing(img,device)
if is_color: #是否识别颜色
preds,color_preds = model(input)
color_preds = torch.softmax(color_preds,dim=-1)
color_conf,color_index = torch.max(color_preds,dim=-1)
color_conf=color_conf.item()
else:
preds = model(input)
preds=torch.softmax(preds,dim=-1)
prob,index=preds.max(dim=-1)
index = index.view(-1).detach().cpu().numpy()
prob=prob.view(-1).detach().cpu().numpy()
# preds=preds.view(-1).detach().cpu().numpy()
newPreds,new_index=decodePlate(index)
prob=prob[new_index]
plate=""
for i in newPreds:
plate+=plateName[i]
# if not (plate[0] in plateName[1:44] ):
# return ""
if is_color:
return plate,prob,color[color_index],color_conf #返回车牌号以及每个字符的概率,以及颜色,和颜色的概率
else:
return plate,prob
def init_model(device,model_path,is_color = True):
# print( print(sys.path))
# model_path ="plate_recognition/model/checkpoint_61_acc_0.9715.pth"
check_point = torch.load(model_path,map_location=device)
model_state=check_point['state_dict']
cfg=check_point['cfg']
color_classes=0
if is_color:
color_classes=5 #颜色类别数
model = myNet_ocr_color(num_classes=len(plateName),export=True,cfg=cfg,color_num=color_classes)
model.load_state_dict(model_state,strict=False)
model.to(device)
model.eval()
return model
# model = init_model(device)
if __name__ == '__main__':
model_path = r"weights/plate_rec_color.pth"
image_path ="images/tmp2424.png"
testPath = r"/mnt/Gpan/Mydata/pytorchPorject/CRNN/crnn_plate_recognition/images"
fileList=[]
allFilePath(testPath,fileList)
# result = get_plate_result(image_path,device)
# print(result)
is_color = False
model = init_model(device,model_path,is_color=is_color)
right=0
begin = time.time()
for imge_path in fileList:
img=cv2.imread(imge_path)
if is_color:
plate,_,plate_color,_=get_plate_result(img,device,model,is_color=is_color)
print(plate)
else:
plate,_=get_plate_result(img,device,model,is_color=is_color)
print(plate,imge_path)
|
2301_81045437/yolov7_plate
|
plate_recognition/plate_rec.py
|
Python
|
unknown
| 4,004
|
import os
def allFilePath(rootPath,allFIleList):
fileList = os.listdir(rootPath)
for temp in fileList:
if os.path.isfile(os.path.join(rootPath,temp)):
allFIleList.append(os.path.join(rootPath,temp))
else:
allFilePath(os.path.join(rootPath,temp),allFIleList)
val_path =r"/mnt/Gpan/Mydata/pytorchPorject/datasets/ccpd/val_detect/danger_data_val/"
file_list = []
allFilePath(val_path,file_list)
for file_ in file_list:
new_file = file_.replace(" ","")
print(file_,new_file)
os.rename(file_,new_file)
|
2301_81045437/yolov7_plate
|
read_image.py
|
Python
|
unknown
| 561
|
cmake_minimum_required(VERSION 3.10)
project(plate_rec)
add_definitions(-std=c++11)
add_definitions(-w)
# option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
find_package(CUDA REQUIRED)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Release)
#cuda
include_directories(/mnt/Gu/softWare/cuda-11.0/targets/x86_64-linux/include)
link_directories(/mnt/Gu/softWare/cuda-11.0/targets/x86_64-linux/lib)
#tensorrt
include_directories(/mnt/Gpan/tensorRT/TensorRT-8.2.0.6//include/)
link_directories(/mnt/Gpan/tensorRT/TensorRT-8.2.0.6/lib/)
#opencv
find_package(OpenCV)
include_directories(${OpenCV_INCLUDE_DIRS})
include_directories(${PROJECT_SOURCE_DIR}/include)
#onnx2trt
add_subdirectory(${PROJECT_SOURCE_DIR}/onnx2trt)
cuda_add_executable(plate_rec detect_rec_plate.cpp utils.cpp preprocess.cu)
target_link_libraries(plate_rec nvinfer)
target_link_libraries(plate_rec cudart)
target_link_libraries(plate_rec nvonnxparser)
target_link_libraries(plate_rec ${OpenCV_LIBS})
add_definitions(-O2 -pthread)
|
2301_81045437/yolov7_plate
|
tensorrt/CMakeLists.txt
|
CMake
|
unknown
| 995
|
#include <fstream>
#include <iostream>
#include <sstream>
#include <numeric>
#include <chrono>
#include <vector>
#include <opencv2/opencv.hpp>
#include <dirent.h>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"
#include "include/utils.hpp"
#include "preprocess.h"
#define MAX_IMAGE_INPUT_SIZE_THRESH 5000 * 5000
struct bbox
{
float x1,x2,y1,y2;
float landmarks[8];
float score;
};
bool my_func(bbox a,bbox b)
{
return a.score>b.score;
}
float get_IOU(bbox a,bbox b)
{
float x1 = std::max(a.x1,b.x1);
float x2 = std::min(a.x2,b.x2);
float y1 = std::max(a.y1,b.y1);
float y2 = std::min(a.y2,b.y2);
float w = std::max(0.0f,x2-x1);
float h = std::max(0.0f,y2-y1);
float inter_area = w*h;
float union_area = (a.x2-a.x1)*(a.y2-a.y1)+(b.x2-b.x1)*(b.y2-b.y1)-inter_area;
float IOU = 1.0*inter_area/ union_area;
return IOU;
}
#define CHECK(status) \
do\
{\
auto ret = (status);\
if (ret != 0)\
{\
std::cerr << "Cuda failure: " << ret << std::endl;\
abort();\
}\
} while (0)
#define DEVICE 0 // GPU id
#define NMS_THRESH 0.45
#define BBOX_CONF_THRESH 0.3
using namespace nvinfer1;
// stuff we know about the network and the input/output blobs
const std::vector<std::string> plate_string={"#","京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖", \
"闽","赣","鲁","豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","学","警","港","澳","挂","使","领","民","航","深", \
"0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z"};
const std::vector<std::string> plate_string_yinwen={"#","<beijing>","<hu>","<tianjin>","<chongqing>","<hebei>","<jing>","<meng>","<liao>","<jilin>","<hei>","<su>","<zhe>","<wan>", \
"<fujian>","<gan>","<lun>","<henan>","<hubei>","<hunan>","<yue>","<guangxi>","<qiong>","<chuan>","<guizhou>","<yun>","<zang>","<shanxi>","<gan>","<qinghai>",\
"<ning>","<xin>","<xue>","<police>","<hongkang>","<Macao>","<gua>","<shi>","<ling>","<min>","<hang>","<shen>", \
"0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z"};
static const int INPUT_W = 640;
static const int INPUT_H = 640;
static const int NUM_CLASSES = 2; //单层车牌,双层车牌两类
const char* INPUT_BLOB_NAME = "images"; //onnx 输入 名字
const char* OUTPUT_BLOB_NAME = "output"; //onnx 输出 名字
static Logger gLogger;
cv::Mat static_resize(cv::Mat& img,int &top,int &left) //对应yolov5中的letter_box
{
float r = std::min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));
// r = std::min(r, 1.0f);
int unpad_w = r * img.cols;
int unpad_h = r * img.rows;
left = (INPUT_W-unpad_w)/2;
top = (INPUT_H-unpad_h)/2;
int right = INPUT_W-unpad_w-left;
int bottom = INPUT_H-unpad_h-top;
cv::Mat re(unpad_h, unpad_w, CV_8UC3);
cv::resize(img, re, re.size());
cv::Mat out;
cv::copyMakeBorder(re,out,top,bottom,left,right,cv::BORDER_CONSTANT,cv::Scalar(114,114,114));
return out;
}
struct Object
{
cv::Rect_<float> rect; //
float landmarks[8]; //4个关键点
int label;
float prob;
};
static inline float intersection_area(const Object& a, const Object& b)
{
cv::Rect_<float> inter = a.rect & b.rect;
return inter.area();
}
static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
{
int i = left;
int j = right;
float p = faceobjects[(left + right) / 2].prob;
while (i <= j)
{
while (faceobjects[i].prob > p)
i++;
while (faceobjects[j].prob < p)
j--;
if (i <= j)
{
// swap
std::swap(faceobjects[i], faceobjects[j]);
i++;
j--;
}
}
#pragma omp parallel sections
{
#pragma omp section
{
if (left < j) qsort_descent_inplace(faceobjects, left, j);
}
#pragma omp section
{
if (i < right) qsort_descent_inplace(faceobjects, i, right);
}
}
}
static void qsort_descent_inplace(std::vector<Object>& objects)
{
if (objects.empty())
return;
qsort_descent_inplace(objects, 0, objects.size() - 1);
}
static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
{
picked.clear();
const int n = faceobjects.size();
std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
areas[i] = faceobjects[i].rect.area();
}
for (int i = 0; i < n; i++)
{
const Object& a = faceobjects[i];
int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const Object& b = faceobjects[picked[j]];
// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}
if (keep)
picked.push_back(i);
}
}
std::vector<int> my_nms(std::vector<bbox> &bboxes, float nms_threshold)
{
std:: vector<int> choice;
for(int i = 0; i<bboxes.size(); i++)
{
int keep = 1;
for(int j = 0; j<choice.size(); j++)
{
float IOU = get_IOU(bboxes[i],bboxes[choice[j]]);
if (IOU>nms_threshold)
keep = 0;
}
if (keep)
choice.push_back(i);
}
return choice;
}
int find_max(float *prob,int num) //找到类别
{
int max= 0;
for(int i=1; i<num; i++)
{
if (prob[max]<prob[i])
max = i;
}
return max;
}
static void generate_yolox_proposals(float *feat_blob, float prob_threshold,
std::vector<Object> &objects,int OUTPUT_CANDIDATES) {
const int num_class = 2;
const int ckpt=12 ; //yolov7 是12,yolov5是8
const int num_anchors = OUTPUT_CANDIDATES;
for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) {
// const int basic_pos = anchor_idx * (num_class + 5 + 1);
// float box_objectness = feat_blob[basic_pos + 4];
// int cls_id = feat_blob[basic_pos + 5];
// float score = feat_blob[basic_pos + 5 + 1 + cls_id];
// score *= box_objectness;
const int basic_pos = anchor_idx * (num_class + 5 + ckpt); //5代表 x,y,w,h,object_score 8代表4个关键点
float box_objectness = feat_blob[basic_pos + 4];
// int cls_id = find_max(&feat_blob[basic_pos +5+ckpt],num_class); //找到类别v5
int cls_id = find_max(&feat_blob[basic_pos +5],num_class); //v7
// float score = feat_blob[basic_pos + 5 +8 + cls_id]; //v5
float score = feat_blob[basic_pos + 5 + cls_id]; //v7
score *= box_objectness;
if (score > prob_threshold) {
// yolox/models/yolo_head.py decode logic
float x_center = feat_blob[basic_pos + 0];
float y_center = feat_blob[basic_pos + 1];
float w = feat_blob[basic_pos + 2];
float h = feat_blob[basic_pos + 3];
float x0 = x_center - w * 0.5f;
float y0 = y_center - h * 0.5f;
// float *landmarks=&feat_blob[basic_pos +5]; //v5
float *landmarks=&feat_blob[basic_pos +5+num_class];
Object obj;
obj.rect.x = x0;
obj.rect.y = y0;
obj.rect.width = w;
obj.rect.height = h;
obj.label = cls_id;
obj.prob = score;
int k = 0;
// for (int i = 0; i<ckpt; i++)
// {
// obj.landmarks[k++]=landmarks[i];
// }
obj.landmarks[0]=landmarks[0];
obj.landmarks[1]=landmarks[1];
obj.landmarks[2]=landmarks[3];
obj.landmarks[3]=landmarks[4];
obj.landmarks[4]=landmarks[6];
obj.landmarks[5]=landmarks[7];
obj.landmarks[6]=landmarks[9];
obj.landmarks[7]=landmarks[10];
objects.push_back(obj);
}
}
}
static void generate_proposals(float *feat_blob, float prob_threshold,
std::vector<bbox> &bboxes,int OUTPUT_CANDIDATES) {
const int num_class = 3;
const int num_anchors = OUTPUT_CANDIDATES;
for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) {
// const int basic_pos = anchor_idx * (num_class + 5 + 1);
// float box_objectness = feat_blob[basic_pos + 4];
// int cls_id = feat_blob[basic_pos + 5];
// float score = feat_blob[basic_pos + 5 + 1 + cls_id];
// score *= box_objectness;
const int basic_pos = anchor_idx * (num_class + 5 + 8); //5代表 x,y,w,h,object_score 8代表4个关键点
float box_objectness = feat_blob[basic_pos + 4];
int cls_id = find_max(&feat_blob[basic_pos +5+8],num_class); //找到类别
float score = feat_blob[basic_pos + 5 +8 + cls_id];
score *= box_objectness;
if (score > prob_threshold) {
// yolox/models/yolo_head.py decode logic
float x_center = feat_blob[basic_pos + 0];
float y_center = feat_blob[basic_pos + 1];
float w = feat_blob[basic_pos + 2];
float h = feat_blob[basic_pos + 3];
float x0 = x_center - w * 0.5f;
float y0 = y_center - h * 0.5f;
float *landmarks=&feat_blob[basic_pos +5];
bbox obj;
obj.x1=x0;
obj.y1=y0;
obj.x2=x0+w;
obj.y2=y0+h;
obj.score = score;
for (int i = 0; i<8; i++)
{
obj.landmarks[i]=landmarks[i];
}
bboxes.push_back(obj);
}
}
}
float* blobFromImage(cv::Mat& img){
float* blob = new float[img.total()*3];
int channels = 3;
int img_h = img.rows;
int img_w = img.cols;
int k = 0;
for (size_t c = 0; c < channels; c++)
{
for (size_t h = 0; h < img_h; h++)
{
for (size_t w = 0; w < img_w; w++)
{
// blob[c * img_w * img_h + h * img_w + w] =
// (float)img.at<cv::Vec3b>(h, w)[c];
blob[k++] =
(float)img.at<cv::Vec3b>(h, w)[2-c]/255.0;
}
}
}
return blob;
}
void blobFromImage_plate(cv::Mat& img,float mean_value,float std_value,float *blob)
{
// float* blob = new float[img.total()*3];
// int channels = NUM_CLASSES;
int img_h = img.rows;
int img_w = img.cols;
int k = 0;
for (size_t c = 0; c <3; c++)
{
for (size_t h = 0; h < img_h; h++)
{
for (size_t w = 0; w < img_w; w++)
{
blob[k++] =
((float)img.at<cv::Vec3b>(h, w)[c]/255.0-mean_value)/std_value;
}
}
}
// return blob;
}
static void decode_outputs(float* prob, std::vector<Object>& objects, float scale, const int img_w, const int img_h,int OUTPUT_CANDIDATES,int top,int left) {
std::vector<Object> proposals;
std::vector<bbox> bboxes;
generate_yolox_proposals(prob, BBOX_CONF_THRESH, proposals,OUTPUT_CANDIDATES);
// generate_proposals(prob, BBOX_CONF_THRESH, bboxes,OUTPUT_CANDIDATES);
// std::cout << "num of boxes before nms: " << proposals.size() << std::endl;
qsort_descent_inplace(proposals);
// std::sort(bboxes.begin(),bboxes.end(),my_func);
std::vector<int> picked;
nms_sorted_bboxes(proposals, picked, NMS_THRESH);
// auto choice =my_nms(bboxes, NMS_THRESH);
int count = picked.size();
// std::cout << "num of boxes: " << count << std::endl;
objects.resize(count);
for (int i = 0; i < count; i++)
{
objects[i] = proposals[picked[i]];
// adjust offset to original unpadded
float x0 = (objects[i].rect.x-left) / scale;
float y0 = (objects[i].rect.y-top) / scale;
float x1 = (objects[i].rect.x + objects[i].rect.width-left) / scale;
float y1 = (objects[i].rect.y + objects[i].rect.height-top) / scale;
float *landmarks = objects[i].landmarks;
for(int i= 0; i<8; i++)
{
if(i%2==0)
landmarks[i]=(landmarks[i]-left)/scale;
else
landmarks[i]=(landmarks[i]-top)/scale;
}
// clip
x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);
objects[i].rect.x = x0;
objects[i].rect.y = y0;
objects[i].rect.width = x1 - x0;
objects[i].rect.height = y1 - y0;
}
}
const float color_list[4][3] =
{
{255, 0, 0},
{0, 255, 0},
{0, 0, 255},
{0, 255, 255},
};
static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects, std::string f)
{
static const char* class_names[] = {
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"
};
cv::Mat image = bgr.clone();
for (size_t i = 0; i < objects.size(); i++)
{
const Object& obj = objects[i];
// fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
// obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::Scalar color = cv::Scalar(color_list[obj.label][0], color_list[obj.label][1], color_list[obj.label][2]);
float c_mean = cv::mean(color)[0];
cv::Scalar txt_color;
if (c_mean > 0.5){
txt_color = cv::Scalar(0, 0, 0);
}else{
txt_color = cv::Scalar(255, 255, 255);
}
cv::rectangle(image, obj.rect, color * 255, 2);
char text[256];
sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine);
cv::Scalar txt_bk_color = color * 0.7 * 255;
int x = obj.rect.x;
int y = obj.rect.y + 1;
//int y = obj.rect.y - label_size.height - baseLine;
if (y > image.rows)
y = image.rows;
//if (x + label_size.width > image.cols)
//x = image.cols - label_size.width;
cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
txt_bk_color, -1);
cv::putText(image, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1);
}
int pos = f.find_last_of("/");
auto substr = f.substr(pos+1);
std::string savePath = "/mnt/Gpan/Mydata/pytorchPorject/yoloxNew/newYoloxCpp/result_pic/"+substr;
cv::imwrite(savePath, image);
// fprintf(stderr, "save vis file\n");
// cv::imshow("image", image);
// cv::waitKey(0);
}
void doInference(IExecutionContext& context, float* input, float* output, const int output_size, cv::Size input_shape,const char *INPUT_BLOB_NAME,const char *OUTPUT_BLOB_NAME) {
const ICudaEngine& engine = context.getEngine();
// Pointers to input and output device buffers to pass to engine.
// Engine requires exactly IEngine::getNbBindings() number of buffers.
assert(engine.getNbBindings() == 2);
void* buffers[2];
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);
const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);
int mBatchSize = engine.getMaxBatchSize();
// Create GPU buffers on device
CHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float)));
CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float)));
// Create stream
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
CHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(1, buffers, stream, nullptr);
// context.enqueueV2( buffers, stream, nullptr);
CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
// Release stream and buffers
cudaStreamDestroy(stream);
CHECK(cudaFree(buffers[inputIndex]));
CHECK(cudaFree(buffers[outputIndex]));
}
float getNorm2(float x,float y)
{
return sqrt(x*x+y*y);
}
cv::Mat getTransForm(cv::Mat &src_img, cv::Point2f order_rect[4]) //透视变换
{
cv::Point2f w1=order_rect[0]-order_rect[1];
cv::Point2f w2=order_rect[2]-order_rect[3];
auto width1 = getNorm2(w1.x,w1.y);
auto width2 = getNorm2(w2.x,w2.y);
auto maxWidth = std::max(width1,width2);
cv::Point2f h1=order_rect[0]-order_rect[3];
cv::Point2f h2=order_rect[1]-order_rect[2];
auto height1 = getNorm2(h1.x,h1.y);
auto height2 = getNorm2(h2.x,h2.y);
auto maxHeight = std::max(height1,height2);
// 透视变换
std::vector<cv::Point2f> pts_ori(4);
std::vector<cv::Point2f> pts_std(4);
pts_ori[0]=order_rect[0];
pts_ori[1]=order_rect[1];
pts_ori[2]=order_rect[2];
pts_ori[3]=order_rect[3];
pts_std[0]=cv::Point2f(0,0);
pts_std[1]=cv::Point2f(maxWidth,0);
pts_std[2]=cv::Point2f(maxWidth,maxHeight);
pts_std[3]=cv::Point2f(0,maxHeight);
cv::Mat M = cv::getPerspectiveTransform(pts_ori,pts_std);
cv:: Mat dstimg;
cv::warpPerspective(src_img,dstimg,M,cv::Size(maxWidth,maxHeight));
return dstimg;
}
cv::Mat get_split_merge(cv::Mat &img) //双层车牌 分割 拼接
{
cv::Rect upper_rect_area = cv::Rect(0,0,img.cols,int(5.0/12*img.rows));
cv::Rect lower_rect_area = cv::Rect(0,int(1.0/3*img.rows),img.cols,img.rows-int(1.0/3*img.rows));
cv::Mat img_upper = img(upper_rect_area);
cv::Mat img_lower =img(lower_rect_area);
cv::resize(img_upper,img_upper,img_lower.size());
cv::Mat out(img_lower.rows,img_lower.cols+img_upper.cols, CV_8UC3, cv::Scalar(114, 114, 114));
img_upper.copyTo(out(cv::Rect(0,0,img_upper.cols,img_upper.rows)));
img_lower.copyTo(out(cv::Rect(img_upper.cols,0,img_lower.cols,img_lower.rows)));
return out;
}
std::string decode_outputs(float *prob,int output_size)
{
std::string plate ="";
std::string pre_str ="#";
for (int i = 0; i<output_size; i++)
{
int index = int(prob[i]);
if (plate_string[index]!="#" && plate_string[index]!=pre_str)
plate+=plate_string[index];
pre_str = plate_string[index];
}
return plate;
}
std::string decode_outputs_pingyin(float *prob,int output_size) //拼音
{
std::string plate ="";
std::string pre_str ="#";
for (int i = 0; i<output_size; i++)
{
int index = int(prob[i]);
if (plate_string_yinwen[index]!="#" && plate_string_yinwen[index]!=pre_str)
plate+=plate_string_yinwen[index];
pre_str = plate_string_yinwen[index];
}
return plate;
}
void doInference_cu(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output, int batchSize,int OUTPUT_SIZE) {
// infer on the batch asynchronously, and DMA output back to host
context.enqueue(batchSize, buffers, stream, nullptr);
CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
int main(int argc, char** argv) {
cudaSetDevice(DEVICE);
char *trtModelStreamDet{nullptr};
char *trtModelStreamRec{nullptr};
size_t size{0};
size_t size_rec{0};
// argv[1]="/mnt/Gu/xiaolei/cplusplus/trt_project/chinese_plate_recoginition/build/plate_detect.trt";
// argv[2]="/mnt/Gu/xiaolei/cplusplus/trt_project/chinese_plate_recoginition/build/plate_rec.trt";
// argv[3]="/mnt/Gu/xiaolei/cplusplus/trt_project/chinese_plate_recoginition/test_imgs/single_blue.jpg";
// argv[4]="output.jpg";
const std::string engine_file_path {argv[1]};
std::ifstream file(engine_file_path, std::ios::binary);
if (file.good()) {
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStreamDet = new char[size];
assert(trtModelStreamDet);
file.read(trtModelStreamDet, size);
file.close();
}
const std::string engine_file_path_rec {argv[2]};
std::ifstream file_rec(engine_file_path_rec, std::ios::binary);
if (file_rec.good()) {
file_rec.seekg(0, file_rec.end);
size_rec = file_rec.tellg();
file_rec.seekg(0, file_rec.beg);
trtModelStreamRec = new char[size_rec];
assert(trtModelStreamRec);
file_rec.read(trtModelStreamRec, size_rec);
file_rec.close();
}
//det模型trt初始化
IRuntime* runtime_det = createInferRuntime(gLogger);
assert(runtime_det != nullptr);
ICudaEngine* engine_det = runtime_det->deserializeCudaEngine(trtModelStreamDet, size);
assert(engine_det != nullptr);
IExecutionContext* context_det = engine_det->createExecutionContext();
assert(context_det != nullptr);
delete[] trtModelStreamDet;
//rec模型trt初始化
IRuntime* runtime_rec = createInferRuntime(gLogger);
assert(runtime_rec!= nullptr);
ICudaEngine* engine_rec = runtime_rec->deserializeCudaEngine(trtModelStreamRec, size_rec);
assert(engine_rec != nullptr);
IExecutionContext* context_rec = engine_rec->createExecutionContext();
assert(context_rec != nullptr);
delete[] trtModelStreamRec;
float *buffers[2];
const int inputIndex = engine_det->getBindingIndex(INPUT_BLOB_NAME);
const int outputIndex = engine_det->getBindingIndex(OUTPUT_BLOB_NAME);
assert(inputIndex == 0);
assert(outputIndex == 1);
// Create GPU buffers on device
auto out_dims = engine_det->getBindingDimensions(1);
auto output_size = 1;
int OUTPUT_CANDIDATES = out_dims.d[1];
for(int j=0;j<out_dims.nbDims;j++) {
output_size *= out_dims.d[j];
}
CHECK(cudaMalloc((void**)&buffers[inputIndex], 3 * INPUT_H * INPUT_W * sizeof(float)));
CHECK(cudaMalloc((void**)&buffers[outputIndex], output_size * sizeof(float)));
// Create stream
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
uint8_t* img_host = nullptr;
uint8_t* img_device = nullptr;
// prepare input data cache in pinned memory
CHECK(cudaMallocHost((void**)&img_host, MAX_IMAGE_INPUT_SIZE_THRESH * 3));
// prepare input data cache in device memory
CHECK(cudaMalloc((void**)&img_device, MAX_IMAGE_INPUT_SIZE_THRESH * 3));
auto out_dims_rec = engine_rec->getBindingDimensions(1);
auto output_size_rec = 1;
int OUTPUT_CANDIDATES_REC = out_dims_rec.d[1];
for(int j=0;j<out_dims_rec.nbDims;j++) {
output_size_rec *= out_dims_rec.d[j];
}
static float* prob = new float[output_size];
static float* prob_rec = new float[output_size_rec];
// 识别模型 参数
int plate_rec_input_w = 168;
int plate_rec_input_h = 48;
float* blob_rec=new float[plate_rec_input_w*plate_rec_input_h*3];
float mean_value=0.588;
float std_value =0.193;
const char* plate_rec_input_name = "images"; //onnx 输入 名字
const char* plate_rec_out_name= "output"; //onnx 输出 名字
// 识别模型 参数
cv::Point2f rect[4];
cv::Point2f order_rect[4];
cv::Point point[1][4];
// std::string imgPath ="/mnt/Gpan/Mydata/pytorchPorject/Chinese_license_plate_detection_recognition/imgs";
std::string input_image_path=argv[3];
std::string imgPath=argv[3];
std::vector<std::string> imagList;
std::vector<std::string>fileType{"jpg","png"};
readFileList(const_cast<char *>(imgPath.c_str()),imagList,fileType);
double sumTime = 0;
int index = 0;
for (auto &input_image_path:imagList)
{
cv::Mat img = cv::imread(input_image_path);
double begin_time = cv::getTickCount();
float *buffer_idx = (float*)buffers[inputIndex];
size_t size_image = img.cols * img.rows * 3;
size_t size_image_dst = INPUT_H * INPUT_W * 3;
memcpy(img_host, img.data, size_image);
CHECK(cudaMemcpyAsync(img_device, img_host, size_image, cudaMemcpyHostToDevice, stream));
preprocess_kernel_img(img_device, img.cols, img.rows, buffer_idx, INPUT_W, INPUT_H, stream);
double time_pre = cv::getTickCount();
double time_pre_=(time_pre-begin_time)/cv::getTickFrequency()*1000;
// std::cout<<"preprocessing time is "<<time_pre_<<" ms"<<std::endl;
doInference_cu(*context_det,stream, (void**)buffers,prob,1,output_size);
float r = std::min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));
// r = std::min(r, 1.0f);
int unpad_w = r * img.cols;
int unpad_h = r * img.rows;
int left = (INPUT_W-unpad_w)/2;
int top = (INPUT_H-unpad_h)/2;
// if (index)
// {
// double use_time =(cv::getTickCount()-begin_time)/cv::getTickFrequency()*1000;
// sumTime+=use_time;
// }
int img_w = img.cols;
int img_h = img.rows;
// int top=0;
// int left= 0;
// cv::Mat pr_img = static_resize(img,top,left);
// float* blob_detect;
// blob_detect = blobFromImage(pr_img);
float scale = std::min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));
//run inference
// auto start = cv::getTickCount();
// doInference(*context_det, blob_detect, prob, output_size, pr_img.size(),INPUT_BLOB_NAME,OUTPUT_BLOB_NAME);
// auto end = cv::getTickCount();
// if (index)
// sumTime+=double((end-begin_time)/cv::getTickFrequency()*1000);
// std::cout << double((end-start)/cv::getTickFrequency()*1000) << "ms" << std::endl;
std::vector<Object> objects;
decode_outputs(prob, objects, scale, img_w, img_h,OUTPUT_CANDIDATES,top,left);
// std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
// std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
std::cout<<input_image_path<<" ";
for (int i = 0; i<objects.size(); i++)
{
// cv::rectangle(img, objects[i].rect, cv::Scalar(0,255,0), 2);
for (int j= 0; j<4; j++)
{
// cv::Scalar color = cv::Scalar(color_list[j][0], color_list[j][1], color_list[j][2]);
// cv::circle(img,cv::Point(objects[i].landmarks[2*j], objects[i].landmarks[2*j+1]),5,color,-1);
order_rect[j]=cv::Point(objects[i].landmarks[2*j],objects[i].landmarks[2*j+1]);
}
cv::Mat roiImg = getTransForm(img,order_rect); //根据关键点进行透视变换
int label = objects[i].label;
if (label) //判断是否双层车牌,是的话进行分割拼接
roiImg=get_split_merge(roiImg);
// cv::imwrite("roi.jpg",roiImg);
cv::resize(roiImg,roiImg,cv::Size(plate_rec_input_w,plate_rec_input_h));
cv::Mat pr_img =roiImg;
// std::cout << "blob image" << std::endl;
auto rec_b = cv::getTickCount();
blobFromImage_plate(pr_img,mean_value,std_value,blob_rec);
auto rec_e = cv::getTickCount();
auto rec_gap = (rec_e-rec_b)/cv::getTickFrequency()*1000;
doInference(*context_rec, blob_rec, prob_rec, output_size_rec, pr_img.size(),plate_rec_input_name,plate_rec_out_name);
auto plate_number = decode_outputs(prob_rec,output_size_rec);
auto plate_number_pinyin= decode_outputs_pingyin(prob_rec,output_size_rec);
cv::Point origin;
origin.x = objects[i].rect.x;
origin.y = objects[i].rect.y;
cv::putText(img, plate_number_pinyin, origin, cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 0), 2, 8, 0);
std::cout<<" "<<plate_number;
}
double end_time = cv::getTickCount();
auto time_gap = (end_time-begin_time)/cv::getTickFrequency()*1000;
std::cout<<" time_gap: "<<time_gap<<"ms ";
if (index)
{
// double use_time =(cv::getTickCount()-begin_time)/cv::getTickFrequency()*1000;
sumTime+=time_gap;
}
std::cout<<std::endl;
// delete [] blob_detect;
index+=1;
}
// cv::imwrite("out.jpg",img);
// destroy the engine
std::cout<<"averageTime:"<<(sumTime/(imagList.size()-1))<<"ms"<<std::endl;
context_det->destroy();
engine_det->destroy();
runtime_det->destroy();
context_rec->destroy();
engine_rec->destroy();
runtime_rec->destroy();
delete [] blob_rec;
cudaStreamDestroy(stream);
CHECK(cudaFree(img_device));
CHECK(cudaFreeHost(img_host));
CHECK(cudaFree(buffers[inputIndex]));
CHECK(cudaFree(buffers[outputIndex]));
return 0;
}
|
2301_81045437/yolov7_plate
|
tensorrt/detect_rec_plate.cpp
|
C++
|
unknown
| 31,057
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENSORRT_LOGGING_H
#define TENSORRT_LOGGING_H
#include "NvInferRuntimeCommon.h"
#include <cassert>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
using Severity = nvinfer1::ILogger::Severity;
class LogStreamConsumerBuffer : public std::stringbuf
{
public:
LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mOutput(stream)
, mPrefix(prefix)
, mShouldLog(shouldLog)
{
}
LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
: mOutput(other.mOutput)
{
}
~LogStreamConsumerBuffer()
{
// std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
// std::streambuf::pptr() gives a pointer to the current position of the output sequence
// if the pointer to the beginning is not equal to the pointer to the current position,
// call putOutput() to log the output to the stream
if (pbase() != pptr())
{
putOutput();
}
}
// synchronizes the stream buffer and returns 0 on success
// synchronizing the stream buffer consists of inserting the buffer contents into the stream,
// resetting the buffer and flushing the stream
virtual int sync()
{
putOutput();
return 0;
}
void putOutput()
{
if (mShouldLog)
{
// prepend timestamp
std::time_t timestamp = std::time(nullptr);
tm* tm_local = std::localtime(×tamp);
std::cout << "[";
std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
// std::stringbuf::str() gets the string contents of the buffer
// insert the buffer contents pre-appended by the appropriate prefix into the stream
mOutput << mPrefix << str();
// set the buffer to empty
str("");
// flush the stream
mOutput.flush();
}
}
void setShouldLog(bool shouldLog)
{
mShouldLog = shouldLog;
}
private:
std::ostream& mOutput;
std::string mPrefix;
bool mShouldLog;
};
//!
//! \class LogStreamConsumerBase
//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
//!
class LogStreamConsumerBase
{
public:
LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mBuffer(stream, prefix, shouldLog)
{
}
protected:
LogStreamConsumerBuffer mBuffer;
};
//!
//! \class LogStreamConsumer
//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
//! Order of base classes is LogStreamConsumerBase and then std::ostream.
//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
//! Please do not change the order of the parent classes.
//!
class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
{
public:
//! \brief Creates a LogStreamConsumer which logs messages with level severity.
//! Reportable severity determines if the messages are severe enough to be logged.
LogStreamConsumer(Severity reportableSeverity, Severity severity)
: LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(severity <= reportableSeverity)
, mSeverity(severity)
{
}
LogStreamConsumer(LogStreamConsumer&& other)
: LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(other.mShouldLog)
, mSeverity(other.mSeverity)
{
}
void setReportableSeverity(Severity reportableSeverity)
{
mShouldLog = mSeverity <= reportableSeverity;
mBuffer.setShouldLog(mShouldLog);
}
private:
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
static std::string severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
bool mShouldLog;
Severity mSeverity;
};
//! \class Logger
//!
//! \brief Class which manages logging of TensorRT tools and samples
//!
//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
//! and supports logging two types of messages:
//!
//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
//! - Test pass/fail messages
//!
//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
//!
//! In the future, this class could be extended to support dumping test results to a file in some standard format
//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
//!
//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
//! library and messages coming from the sample.
//!
//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
//! object.
class Logger : public nvinfer1::ILogger
{
public:
Logger(Severity severity = Severity::kWARNING)
: mReportableSeverity(severity)
{
}
//!
//! \enum TestResult
//! \brief Represents the state of a given test
//!
enum class TestResult
{
kRUNNING, //!< The test is running
kPASSED, //!< The test passed
kFAILED, //!< The test failed
kWAIVED //!< The test was waived
};
//!
//! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
//! \return The nvinfer1::ILogger associated with this Logger
//!
//! TODO Once all samples are updated to use this method to register the logger with TensorRT,
//! we can eliminate the inheritance of Logger from ILogger
//!
nvinfer1::ILogger& getTRTLogger()
{
return *this;
}
//!
//! \brief Implementation of the nvinfer1::ILogger::log() virtual method
//!
//! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
//! inheritance from nvinfer1::ILogger
//!
// void log(Severity severity, const char* msg) override
void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept
{
LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
}
//!
//! \brief Method for controlling the verbosity of logging output
//!
//! \param severity The logger will only emit messages that have severity of this level or higher.
//!
void setReportableSeverity(Severity severity)
{
mReportableSeverity = severity;
}
//!
//! \brief Opaque handle that holds logging information for a particular test
//!
//! This object is an opaque handle to information used by the Logger to print test results.
//! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
//! with Logger::reportTest{Start,End}().
//!
class TestAtom
{
public:
TestAtom(TestAtom&&) = default;
private:
friend class Logger;
TestAtom(bool started, const std::string& name, const std::string& cmdline)
: mStarted(started)
, mName(name)
, mCmdline(cmdline)
{
}
bool mStarted;
std::string mName;
std::string mCmdline;
};
//!
//! \brief Define a test for logging
//!
//! \param[in] name The name of the test. This should be a string starting with
//! "TensorRT" and containing dot-separated strings containing
//! the characters [A-Za-z0-9_].
//! For example, "TensorRT.sample_googlenet"
//! \param[in] cmdline The command line used to reproduce the test
//
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
//!
static TestAtom defineTest(const std::string& name, const std::string& cmdline)
{
return TestAtom(false, name, cmdline);
}
//!
//! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
//! as input
//!
//! \param[in] name The name of the test
//! \param[in] argc The number of command-line arguments
//! \param[in] argv The array of command-line arguments (given as C strings)
//!
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
{
auto cmdline = genCmdlineString(argc, argv);
return defineTest(name, cmdline);
}
//!
//! \brief Report that a test has started.
//!
//! \pre reportTestStart() has not been called yet for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has started
//!
static void reportTestStart(TestAtom& testAtom)
{
reportTestResult(testAtom, TestResult::kRUNNING);
assert(!testAtom.mStarted);
testAtom.mStarted = true;
}
//!
//! \brief Report that a test has ended.
//!
//! \pre reportTestStart() has been called for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has ended
//! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
//! TestResult::kFAILED, TestResult::kWAIVED
//!
static void reportTestEnd(const TestAtom& testAtom, TestResult result)
{
assert(result != TestResult::kRUNNING);
assert(testAtom.mStarted);
reportTestResult(testAtom, result);
}
static int reportPass(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kPASSED);
return EXIT_SUCCESS;
}
static int reportFail(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kFAILED);
return EXIT_FAILURE;
}
static int reportWaive(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kWAIVED);
return EXIT_SUCCESS;
}
static int reportTest(const TestAtom& testAtom, bool pass)
{
return pass ? reportPass(testAtom) : reportFail(testAtom);
}
Severity getReportableSeverity() const
{
return mReportableSeverity;
}
private:
//!
//! \brief returns an appropriate string for prefixing a log message with the given severity
//!
static const char* severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate string for prefixing a test result message with the given result
//!
static const char* testResultString(TestResult result)
{
switch (result)
{
case TestResult::kRUNNING: return "RUNNING";
case TestResult::kPASSED: return "PASSED";
case TestResult::kFAILED: return "FAILED";
case TestResult::kWAIVED: return "WAIVED";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
//!
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
//!
//! \brief method that implements logging test results
//!
static void reportTestResult(const TestAtom& testAtom, TestResult result)
{
severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
<< testAtom.mCmdline << std::endl;
}
//!
//! \brief generate a command line string from the given (argc, argv) values
//!
static std::string genCmdlineString(int argc, char const* const* argv)
{
std::stringstream ss;
for (int i = 0; i < argc; i++)
{
if (i > 0)
ss << " ";
ss << argv[i];
}
return ss.str();
}
Severity mReportableSeverity;
};
namespace
{
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
//!
//! Example usage:
//!
//! LOG_VERBOSE(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
//!
//! Example usage:
//!
//! LOG_INFO(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_INFO(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
//!
//! Example usage:
//!
//! LOG_WARN(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_WARN(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
//!
//! Example usage:
//!
//! LOG_ERROR(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_ERROR(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
// ("fatal" severity)
//!
//! Example usage:
//!
//! LOG_FATAL(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_FATAL(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
}
} // anonymous namespace
#endif // TENSORRT_LOGGING_H
|
2301_81045437/yolov7_plate
|
tensorrt/include/logging.h
|
C++
|
unknown
| 16,627
|
#ifndef _UTILS_H_
#define _UTILS_H_
#include <vector>
#include <string>
#include <dirent.h>
#include <sys/types.h>
#include <iostream>
#include<dirent.h>
#include <sys/types.h>
#include <string.h>
#include <sys/stat.h>
#include <opencv2/opencv.hpp>
struct boundingBox
{
float x;
float y;
float w;
float h;
int label;
float score;
};
std::string getHouZhui(std::string fileName);
int readFileList(char *basePath,std::vector<std::string> &fileList,std::vector<std::string> fileType);
void draw_rect(const cv::Mat& image, const std::vector<boundingBox>bboxes,const char* class_names[]);
bool cmpBox(boundingBox b1, boundingBox b2);
float getIou(boundingBox b1,boundingBox b2) ;
void myNms(std::vector<boundingBox>&bboxes,float score);
const float color_list1[80][3] =
{
{0.000, 0.447, 0.741},
{0.850, 0.325, 0.098},
{0.929, 0.694, 0.125},
{0.494, 0.184, 0.556},
{0.466, 0.674, 0.188},
{0.301, 0.745, 0.933},
{0.635, 0.078, 0.184},
{0.300, 0.300, 0.300},
{0.600, 0.600, 0.600},
{1.000, 0.000, 0.000},
{1.000, 0.500, 0.000},
{0.749, 0.749, 0.000},
{0.000, 1.000, 0.000},
{0.000, 0.000, 1.000},
{0.667, 0.000, 1.000},
{0.333, 0.333, 0.000},
{0.333, 0.667, 0.000},
{0.333, 1.000, 0.000},
{0.667, 0.333, 0.000},
{0.667, 0.667, 0.000},
{0.667, 1.000, 0.000},
{1.000, 0.333, 0.000},
{1.000, 0.667, 0.000},
{1.000, 1.000, 0.000},
{0.000, 0.333, 0.500},
{0.000, 0.667, 0.500},
{0.000, 1.000, 0.500},
{0.333, 0.000, 0.500},
{0.333, 0.333, 0.500},
{0.333, 0.667, 0.500},
{0.333, 1.000, 0.500},
{0.667, 0.000, 0.500},
{0.667, 0.333, 0.500},
{0.667, 0.667, 0.500},
{0.667, 1.000, 0.500},
{1.000, 0.000, 0.500},
{1.000, 0.333, 0.500},
{1.000, 0.667, 0.500},
{1.000, 1.000, 0.500},
{0.000, 0.333, 1.000},
{0.000, 0.667, 1.000},
{0.000, 1.000, 1.000},
{0.333, 0.000, 1.000},
{0.333, 0.333, 1.000},
{0.333, 0.667, 1.000},
{0.333, 1.000, 1.000},
{0.667, 0.000, 1.000},
{0.667, 0.333, 1.000},
{0.667, 0.667, 1.000},
{0.667, 1.000, 1.000},
{1.000, 0.000, 1.000},
{1.000, 0.333, 1.000},
{1.000, 0.667, 1.000},
{0.333, 0.000, 0.000},
{0.500, 0.000, 0.000},
{0.667, 0.000, 0.000},
{0.833, 0.000, 0.000},
{1.000, 0.000, 0.000},
{0.000, 0.167, 0.000},
{0.000, 0.333, 0.000},
{0.000, 0.500, 0.000},
{0.000, 0.667, 0.000},
{0.000, 0.833, 0.000},
{0.000, 1.000, 0.000},
{0.000, 0.000, 0.167},
{0.000, 0.000, 0.333},
{0.000, 0.000, 0.500},
{0.000, 0.000, 0.667},
{0.000, 0.000, 0.833},
{0.000, 0.000, 1.000},
{0.000, 0.000, 0.000},
{0.143, 0.143, 0.143},
{0.286, 0.286, 0.286},
{0.429, 0.429, 0.429},
{0.571, 0.571, 0.571},
{0.714, 0.714, 0.714},
{0.857, 0.857, 0.857},
{0.000, 0.447, 0.741},
{0.314, 0.717, 0.741},
{0.50, 0.5, 0}
};
#endif
|
2301_81045437/yolov7_plate
|
tensorrt/include/utils.hpp
|
C++
|
unknown
| 2,958
|
cmake_minimum_required(VERSION 3.10)
project(onnx2trt)
add_executable(onnx2trt onnx2trt.cpp)
target_link_libraries(onnx2trt nvinfer)
target_link_libraries(onnx2trt cudart)
target_link_libraries(onnx2trt nvonnxparser)
|
2301_81045437/yolov7_plate
|
tensorrt/onnx2trt/CMakeLists.txt
|
CMake
|
unknown
| 218
|
#include <fstream>
#include <iostream>
#include <sstream>
#include <numeric>
// #include <chrono>
#include <vector>
#include <opencv2/opencv.hpp>
// #include <dirent.h>
#include "NvInfer.h"
#include "NvOnnxParser.h"
// #include "NvInferRuntime.h"
#include "logging.h"
#include "cuda_runtime_api.h"
using namespace nvinfer1;
using namespace std;
static Logger gLogger;
const char* INPUT_BLOB_NAME = "input";
const char* OUTPUT_BLOB_NAME = "output";
void saveToTrtModel(const char * TrtSaveFileName,IHostMemory*trtModelStream)
{
std::ofstream out(TrtSaveFileName, std::ios::binary);
if (!out.is_open())
{
std::cout << "打开文件失败!" <<std:: endl;
}
out.write(reinterpret_cast<const char*>(trtModelStream->data()), trtModelStream->size());
out.close();
}
void onnxToTRTModel(const std::string& modelFile,unsigned int maxBatchSize,IHostMemory*& trtModelStream,const char * TrtSaveFileName)
{
int verbosity = (int) nvinfer1::ILogger::Severity::kWARNING;
// create the builder
IBuilder* builder = createInferBuilder(gLogger);//创建构建器(即指向Ibuilder类型对象的指针)
IBuilderConfig *config = builder->createBuilderConfig();
const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); //必须加不然报错
nvinfer1::INetworkDefinition* network = builder->createNetworkV2(explicitBatch);/*等价于*bulider.createNetwork(),通过Ibulider定义的
名为creatNetwork()方法,创建INetworkDefinition的对象,ntework这个指针指向这个对象*/
auto parser = nvonnxparser::createParser(*network, gLogger.getTRTLogger());//创建解析器
//Optional - uncomment below lines to view network layer information
//config->setPrintLayerInfo(true);
//parser->reportParsingInfo();
if (!parser->parseFromFile(modelFile.c_str(), verbosity)) //解析onnx文件,并填充网络
{
string msg("failed to parse onnx file");
gLogger.log(nvinfer1::ILogger::Severity::kERROR, msg.c_str());
exit(EXIT_FAILURE);
}
// Build the engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(1 << 30);
// builder->setMaxWorkspaceSize(1 << 30);
#ifdef USE_FP16
config->setFlag(BuilderFlag::kFP16);
#endif
// samplesCommon::enableDLA(builder, gUseDLACore);
//当引擎建立起来时,TensorRT会复制
// ICudaEngine* engine = builder->buildCudaEngine(*network);//通过Ibuilder类的buildCudaEngine()方法创建IcudaEngine对象,
ICudaEngine *engine = builder->buildEngineWithConfig(*network,*config);
assert(engine);
// we can destroy the parser
parser->destroy();
// serialize the engine,
// then close everything down
trtModelStream = engine->serialize();//将引擎序列化,保存到文件中
engine->destroy();
network->destroy();
builder->destroy();
config->destroy();
saveToTrtModel(TrtSaveFileName,trtModelStream);
}
void onnxToTRTModelDynamicBatch(const std::string& modelFile, unsigned int maxBatchSize, IHostMemory*& trtModelStream,const char * TrtSaveFileName,int input_h,int input_w) // output buffer for the TensorRT model 动态batch
{
int verbosity = (int) nvinfer1::ILogger::Severity::kWARNING;
// create the builder
IBuilder* builder = createInferBuilder(gLogger);//创建构建器(即指向Ibuilder类型对象的指针)
IBuilderConfig *config = builder->createBuilderConfig();
auto profile = builder->createOptimizationProfile();
const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); //必须加不然报错
nvinfer1::INetworkDefinition* network = builder->createNetworkV2(explicitBatch);/*等价于*bulider.createNetwork(),通过Ibulider定义的
名为creatNetwork()方法,创建INetworkDefinition的对象,ntework这个指针指向这个对象*/
Dims dims = Dims4{1, 3, input_h, input_w};
profile->setDimensions(INPUT_BLOB_NAME,
OptProfileSelector::kMIN, Dims4{1, dims.d[1], dims.d[2], dims.d[3]});
profile->setDimensions(INPUT_BLOB_NAME,
OptProfileSelector::kOPT, Dims4{maxBatchSize, dims.d[1], dims.d[2], dims.d[3]});
profile->setDimensions(INPUT_BLOB_NAME,
OptProfileSelector::kMAX, Dims4{maxBatchSize, dims.d[1], dims.d[2], dims.d[3]});
config->addOptimizationProfile(profile);
auto parser = nvonnxparser::createParser(*network, gLogger.getTRTLogger());//创建解析器
//Optional - uncomment below lines to view network layer information
//config->setPrintLayerInfo(true);
//parser->reportParsingInfo();
if (!parser->parseFromFile(modelFile.c_str(), verbosity)) //解析onnx文件,并填充网络
{
string msg("failed to parse onnx file");
gLogger.log(nvinfer1::ILogger::Severity::kERROR, msg.c_str());
exit(EXIT_FAILURE);
}
// Build the engine
// builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(1 << 30);
// builder->setMaxWorkspaceSize(1 << 30);
#ifdef USE_FP16
config->setFlag(BuilderFlag::kFP16);
#endif
// samplesCommon::enableDLA(builder, gUseDLACore);
//当引擎建立起来时,TensorRT会复制
// ICudaEngine* engine = builder->buildCudaEngine(*network);//通过Ibuilder类的buildCudaEngine()方法创建IcudaEngine对象,
ICudaEngine *engine = builder->buildEngineWithConfig(*network,*config);
assert(engine);
// we can destroy the parser
parser->destroy();
// serialize the engine,
// then close everything down
trtModelStream = engine->serialize();//将引擎序列化,保存到文件中
engine->destroy();
network->destroy();
builder->destroy();
config->destroy();
saveToTrtModel(TrtSaveFileName,trtModelStream);
}
// void readTrtModel(const char * Trtmodel) //读取onnx模型
// {
// size_t size{ 0 };
// std::ifstream file(Trtmodel, std::ios::binary);
// if (file.good()) {
// file.seekg(0, file.end);
// size = file.tellg();
// file.seekg(0, file.beg);
// _trtModelStream = new char[size];
// assert(_trtModelStream);
// file.read(_trtModelStream, size);
// file.close();
// }
// _trtModelStreamSize = size;
// _runtime = createInferRuntime(gLogger);
// _engine1 = _runtime->deserializeCudaEngine(_trtModelStream, _trtModelStreamSize);
// //cudaSetDevice(0);
// context = _engine1->createExecutionContext();
// }
int main(int argc, char** argv)
{
IHostMemory* trtModelStream{nullptr};
int batchSize = atoi(argv[3]);
// int input_h = atoi(argv[4]);
// int input_w=atoi(argv[5]);
onnxToTRTModel(argv[1],batchSize,trtModelStream,argv[2]);
std::cout<<"convert seccuss!"<<std::endl;
}
|
2301_81045437/yolov7_plate
|
tensorrt/onnx2trt/onnx2trt.cpp
|
C++
|
unknown
| 7,277
|
#include <fstream>
#include <iostream>
#include <sstream>
#include <numeric>
#include <chrono>
#include <vector>
#include <opencv2/opencv.hpp>
#include <dirent.h>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"
#include "include/utils.hpp"
#define CHECK(status) \
do\
{\
auto ret = (status);\
if (ret != 0)\
{\
std::cerr << "Cuda failure: " << ret << std::endl;\
abort();\
}\
} while (0)
#define DEVICE 0 // GPU id
const std::vector<std::string> plate_string={"#","京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖", \
"闽","赣","鲁","豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","学","警","港","澳","挂","使","领","民","航","深", \
"0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z"};
using namespace nvinfer1;
// stuff we know about the network and the input/output blobs
static Logger gLogger;
float* blobFromImage_plate(cv::Mat& img,float mean_value,float std_value){
float* blob = new float[img.total()*3];
int channels = 3;
int img_h = img.rows;
int img_w = img.cols;
int k = 0;
for (size_t c = 0; c <3; c++)
{
for (size_t h = 0; h < img_h; h++)
{
for (size_t w = 0; w < img_w; w++)
{
blob[k++] =
((float)img.at<cv::Vec3b>(h, w)[c]/255.0-mean_value)/std_value;
}
}
}
return blob;
}
void doInference(IExecutionContext& context, float* input, float* output, const int output_size, cv::Size input_shape,const char *INPUT_BLOB_NAME,const char *OUTPUT_BLOB_NAME) {
const ICudaEngine& engine = context.getEngine();
// Pointers to input and output device buffers to pass to engine.
// Engine requires exactly IEngine::getNbBindings() number of buffers.
assert(engine.getNbBindings() == 2);
void* buffers[2];
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);
const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);
int mBatchSize = engine.getMaxBatchSize();
// Create GPU buffers on device
CHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float)));
CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float)));
// Create stream
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
CHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(1, buffers, stream, nullptr);
// context.enqueueV2( buffers, stream, nullptr);
CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
// Release stream and buffers
cudaStreamDestroy(stream);
CHECK(cudaFree(buffers[inputIndex]));
CHECK(cudaFree(buffers[outputIndex]));
}
std::string decode_outputs(float *prob,int output_size)
{
std::string plate ="";
std::string pre_str ="#";
for (int i = 0; i<output_size; i++)
{
int index = int(prob[i]);
if (plate_string[index]!="#" && plate_string[index]!=pre_str)
plate+=plate_string[index];
pre_str = plate_string[index];
}
return plate;
}
int main(int argc, char** argv)
{
cudaSetDevice(DEVICE);
// create a model using the API directly and serialize it to a stream
char *trtModelStream{nullptr};
size_t size{0};
int plate_rec_input_w = 168;
int plate_rec_input_h = 48;
float mean_value=0.588;
float std_value =0.193;
const char* plate_rec_input_name = "images"; //onnx 输入 名字
const char* plate_rec_out_name= "output"; //onnx 输出 名字
if (argc == 4 && std::string(argv[2]) == "-i") {
const std::string engine_file_path {argv[1]};
std::ifstream file(engine_file_path, std::ios::binary);
if (file.good()) {
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size);
file.close();
}
} else {
std::cerr << "arguments not right!" << std::endl;
std::cerr << "run 'python3 yolox/deploy/trt.py -n yolox-{tiny, s, m, l, x}' to serialize model first!" << std::endl;
std::cerr << "Then use the following command:" << std::endl;
std::cerr << "./yolox ../model_trt.engine -i ../../../assets/dog.jpg // deserialize file and run inference" << std::endl;
return -1;
}
const std::string input_image_path {argv[3]};
//std::vector<std::string> file_names;
//if (read_files_in_dir(argv[2], file_names) < 0) {
//std::cout << "read_files_in_dir failed." << std::endl;
//return -1;
//}
IRuntime* runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
assert(engine != nullptr);
IExecutionContext* context = engine->createExecutionContext();
assert(context != nullptr);
delete[] trtModelStream;
auto out_dims = engine->getBindingDimensions(1);
auto output_size = 1;
int OUTPUT_CANDIDATES = out_dims.d[1];
for(int j=0;j<out_dims.nbDims;j++) {
output_size *= out_dims.d[j];
}
static float* prob = new float[output_size];
std::string imgPath ="/mnt/Gu/xiaolei/cplusplus/trt_project/chinese_plate_recoginition/result";
std::vector<std::string> imagList;
std::vector<std::string>fileType{"jpg","png"};
readFileList(const_cast<char *>(imgPath.c_str()),imagList,fileType);
double sumTime = 0;
int right_label = 0;
int file_num = imagList.size();
for (auto &input_image_path:imagList)
{
cv::Mat img = cv::imread(input_image_path);
int img_w = img.cols;
int img_h = img.rows;
int top=0;
int left= 0;
cv::resize(img,img,cv::Size(plate_rec_input_w,plate_rec_input_h));
cv::Mat pr_img =img;
// std::cout << "blob image" << std::endl;
float* blob;
blob = blobFromImage_plate(pr_img,mean_value,std_value);
doInference(*context, blob, prob, output_size, pr_img.size(),plate_rec_input_name,plate_rec_out_name);
auto plate_number = decode_outputs(prob,output_size);
int pos = input_image_path.find_last_of("/");
auto image_name = input_image_path.substr(pos+1);
int pos2= image_name.find_last_of("_");
auto gt=image_name.substr(0,pos2);
if(gt==plate_number)
right_label+=1;
std::cout<<input_image_path<<" "<<right_label<<" "<<plate_number<<std::endl;
delete blob;
}
printf("sum is %d,right is %d ,accuracy is %.4f",file_num,right_label,1.0*right_label/file_num);
// destroy the engine
// std::cout<<"averageTime:"<<sumTime/imagList.size()<<std::endl;
context->destroy();
engine->destroy();
runtime->destroy();
return 0;
}
|
2301_81045437/yolov7_plate
|
tensorrt/plate_rec.cpp
|
C++
|
unknown
| 7,719
|
#include "preprocess.h"
#include <opencv2/opencv.hpp>
__global__ void warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
//bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
//normalization
c0 = c0 / 255.0f;
c1 = c1 / 255.0f;
c2 = c2 / 255.0f;
//rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
void preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
AffineMatrix s2d,d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5;
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5;
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
warpaffine_kernel<<<blocks, threads, 0, stream>>>(
src, src_width*3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
|
2301_81045437/yolov7_plate
|
tensorrt/preprocess.cu
|
Cuda
|
unknown
| 3,557
|
#ifndef __PREPROCESS_H
#define __PREPROCESS_H
#include <cuda_runtime.h>
#include <cstdint>
struct AffineMatrix{
float value[6];
};
void preprocess_kernel_img(uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
#endif // __PREPROCESS_H
|
2301_81045437/yolov7_plate
|
tensorrt/preprocess.h
|
C++
|
unknown
| 357
|
#include "utils.hpp"
std::string getHouZhui(std::string fileName)
{
// std::string fileName="/home/xiaolei/23.jpg";
int pos=fileName.find_last_of(std::string("."));
std::string houZui=fileName.substr(pos+1);
return houZui;
}
int readFileList(char *basePath,std::vector<std::string> &fileList,std::vector<std::string> fileType)
{
DIR *dir;
struct dirent *ptr;
char base[1000];
if ((dir=opendir(basePath)) == NULL)
{
perror("Open dir error...");
exit(1);
}
while ((ptr=readdir(dir)) != NULL)
{
if(strcmp(ptr->d_name,".")==0 || strcmp(ptr->d_name,"..")==0) ///current dir OR parrent dir
continue;
else if(ptr->d_type == 8)
{ ///file
if (fileType.size())
{
std::string houZui=getHouZhui(std::string(ptr->d_name));
for (auto &s:fileType)
{
if (houZui==s)
{
fileList.push_back(basePath+std::string("/")+std::string(ptr->d_name));
break;
}
}
}
else
{
fileList.push_back(basePath+std::string("/")+std::string(ptr->d_name));
}
}
else if(ptr->d_type == 10) ///link file
printf("d_name:%s/%s\n",basePath,ptr->d_name);
else if(ptr->d_type == 4) ///dir
{
memset(base,'\0',sizeof(base));
strcpy(base,basePath);
strcat(base,"/");
strcat(base,ptr->d_name);
readFileList(base,fileList,fileType);
}
}
closedir(dir);
return 1;
}
void draw_rect(const cv::Mat& image, const std::vector<boundingBox>bboxes,const char* class_names[])
{
// static const char* class_names[] = {
// "head", "leg", "hand", "back", "nostd", "body", "plate", "logo"};
// cv::Mat image = bgr.clone();
for (size_t i = 0; i < bboxes.size(); i++)
{
// const Object& obj = objects[i];
const boundingBox &obj= bboxes[i];
// fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
// obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::Scalar color = cv::Scalar(color_list1[obj.label][0], color_list1[obj.label][1], color_list1[obj.label][2]);
float c_mean = cv::mean(color)[0];
cv::Scalar txt_color;
if (c_mean > 0.5){
txt_color = cv::Scalar(0, 0, 0);
}else{
txt_color = cv::Scalar(255, 255, 255);
}
cv::Rect myRect(obj.x,obj.y,obj.w,obj.h);
cv::rectangle(image,myRect, color * 255, 2);
char text[256];
sprintf(text, "%s %.1f%%", class_names[obj.label], obj.score * 100);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine);
cv::Scalar txt_bk_color = color * 0.7 * 255;
int x = obj.x;
int y = obj.y + 1;
//int y = obj.rect.y - label_size.height - baseLine;
if (y > image.rows)
y = image.rows;
//if (x + label_size.width > image.cols)
//x = image.cols - label_size.width;
cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
txt_bk_color,-1);
cv::putText(image, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1);
}
}
bool cmpBox(boundingBox b1, boundingBox b2)
{
return b1.score > b2.score;
}
float getIou(boundingBox b1,boundingBox b2) //计算IOU
{
int xl1 = b1.x; //左
int xr1 = b1.w+b1.x; // 右
int yt1=b1.y; //顶
int yb1 = b1.y+b1.h; //底
int xl2 = b2.x; //左
int xr2 = b2.w+b2.x; // 右
int yt2=b2.y; //顶
int yb2 = b2.y+b2.h; //底
int x11 =std::max(xl1,xl2);
int y11 = std::max(yt1,yt2);
int x22 = std::min(xr1,xr2);
int y22 = std::min(yb1,yb2);
float intersectionArea= (x22-x11)*(y22-y11); //交集
float unionArea = (xr1-xl1)*(yb1-yt1)+(xr2-xl2)*(yb2-yt2)-intersectionArea; //并集
return 1.0f*intersectionArea/unionArea;
}
void myNms(std::vector<boundingBox>&bboxes,float score)
{
std::sort(bboxes.begin(),bboxes.end(),cmpBox);
for(int i = 0; i<bboxes.size()-1; i++)
{
for(int j = i+1;j<bboxes.size(); j++)
{
if(getIou(bboxes[i],bboxes[j])>score)
{
bboxes.erase(bboxes.begin()+j);
j--;
}
}
}
}
|
2301_81045437/yolov7_plate
|
tensorrt/utils.cpp
|
C++
|
unknown
| 4,636
|
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
import cv2
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
save_json_kpt=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_txt_tidl=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
wandb_logger=None,
compute_loss=None,
half_precision=True,
is_coco=False,
opt=None,
tidl_load=False,
dump_img=False,
kpt_label=False,
flip_test=False):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt or save_txt_tidl else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
model.model[-1].flip_test = False
model.model[-1].flip_index = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
if isinstance(data, str):
is_coco = data.endswith('coco.yaml') or data.endswith('coco_kpts.yaml')
with open(data) as f:
data = yaml.safe_load(f)
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs = 0
if wandb_logger and wandb_logger.wandb:
log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '), tidl_load=tidl_load, kpt_label=kpt_label)[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
#jdict_kpt = [] if kpt_label else None
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
if dump_img:
dst_file = os.path.join(save_dir, 'dump_img', 'images', 'val2017', Path(paths[0]).stem + '.png')
os.makedirs(os.path.dirname(dst_file), exist_ok=True)
cv2.imwrite( dst_file, img[0].numpy().transpose(1,2,0)[:,:,::-1])
#print(img.shape)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
out, train_out = model(img, augment=augment) # inference and training outputs
if flip_test:
img_flip = torch.flip(img,[3])
model.model[-1].flip_test = True
out_flip, train_out_flip = model(img_flip, augment=augment) # inference and training outputs
model.model[-1].flip_test = False
fuse1 = (out + out_flip)/2.0
out = torch.cat((out,fuse1), axis=1)
out = out[...,:6] if not kpt_label else out
targets = targets[..., :6] if not kpt_label else targets
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
if kpt_label:
num_points = (targets.shape[1]//2 - 1)
targets[:, 2:] *= torch.Tensor([width, height]*num_points).to(device) # to pixels
else:
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, kpt_label=kpt_label, nc=model.yaml['nc'])
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(out):
# if len(pred)>0 and torch.max(pred[:,4])>0.3:
# with open(save_dir / 'persons.txt' , 'a') as f:
# path = Path(paths[si])
# f.write('./images/test2017/'+path.stem + '.jpg' + '\n')
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:,:4], shapes[si][0], shapes[si][1], kpt_label=False) # native-space pred
if kpt_label:
scale_coords(img[si].shape[1:], predn[:,6:], shapes[si][0], shapes[si][1], kpt_label=kpt_label, step=3) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_txt_tidl: # Write to file in tidl dump format
for *xyxy, conf, cls in predn[:, :6].tolist():
xyxy = torch.tensor(xyxy).view(-1).tolist()
line = (conf, cls, *xyxy) if opt.save_conf else (cls, *xyxy) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging - Media Panel Plots
if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None
# Append to pycocotools JSON dictionary
if save_json or save_json_kpt:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
det_dict = {'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
#'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)}
if kpt_label:
key_point = p[6:]
det_dict.update({'keypoints': key_point})
jdict.append(det_dict)
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1], kpt_label=False) # native-space labels, kpt_label is set to False as we are dealing with boxes here
if kpt_label:
tkpt = labels[:, 5:]
scale_coords(img[si].shape[1:], tkpt, shapes[si][0], shapes[si][1], kpt_label=kpt_label) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3000:
f = save_dir / f'{path.stem}_labels.jpg' # labels
#Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
plot_images(img, targets, paths, f, names, kpt_label=kpt_label, orig_shape=shapes[si])
f = save_dir / f'{path.stem}_pred.jpg' # predictions
#Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
plot_images(img, output_to_target(out), paths, f, names, kpt_label=kpt_label, steps=3, orig_shape=shapes[si])
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb_logger and wandb_logger.wandb:
val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb_logger.log({"Validation": val_batches})
if wandb_images:
wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
# Save JSON
if save_json or save_json_kpt and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
with open(pred_json, 'w') as f:
json.dump(jdict, f)
if save_json:
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
elif save_json_kpt:
anno_json = '../coco/annotations/person_keypoints_val2017.json' # annotations json
print('\nEvaluating xtcocotools mAP... saving %s...' % pred_json)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
# from pycocotools.coco import COCO
# from pycocotools.cocoeval import COCOeval
from xtcocotools.coco import COCO
from xtcocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'keypoints', use_area=True) #,
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'xtcocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='weights/plate_detect.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/plate.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-txt-tidl', action='store_true', help='save results to *.txt in tidl format')
parser.add_argument('--tidl-load', action='store_true', help='load thedata from a list specified as in tidl')
parser.add_argument('--dump-img', action='store_true', help='load thedata from a list specified as in tidl')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--save-json-kpt', action='store_true', help='save a cocoapi-compatible JSON results file for key-points')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--kpt-label', type=int, default=4, help='number of keypoints')
parser.add_argument('--flip-test', action='store_true', help='Whether to run flip_test or not')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_json_kpt |= opt.data.endswith('coco_kpts.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.save_json_kpt,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_txt_tidl=opt.save_txt_tidl,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
opt=opt,
tidl_load = opt.tidl_load,
dump_img = opt.dump_img,
kpt_label = opt.kpt_label,
flip_test = opt.flip_test,
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt)
elif opt.task == 'study': # run over a range of settings and save/plot
# python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False, opt=opt)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
|
2301_81045437/yolov7_plate
|
test.py
|
Python
|
unknown
| 22,425
|
import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank, kpt_label = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.kpt_label
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.safe_load(f) # data dict
is_coco = opt.data.endswith('coco.yaml')
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if hasattr(v, 'im'):
if hasattr(v.im, 'implicit'):
pg0.append(v.im.implicit)
else:
for iv in v.im:
pg0.append(iv.implicit)
if hasattr(v, 'ia'):
if hasattr(v.ia, 'implicit'):
pg0.append(v.ia.implicit)
else:
for iv in v.ia:
pg0.append(iv.implicit)
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), kpt_label=kpt_label)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '), kpt_label=kpt_label)[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model, kpt_label=kpt_label) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(6, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 10) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'kpt', 'kptv' ,'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
# if i>10:
# break
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 8) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 33:
f = save_dir / f'train_batch{ni}.jpg' # filename
plot_images(imgs, targets, paths, f, kpt_label=kpt_label)
#Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = test.test(data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco,
kpt_label=kpt_label)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
best_ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
# 'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
# 'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
torch.save(best_ckpt, best)
if wandb_logger.wandb:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco,
kpt_label=kpt_label)
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
if wandb_logger.wandb and not opt.evolve: # Log the stripped model
wandb_logger.wandb.log_artifact(str(final), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['last', 'best', 'stripped'])
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov7-tiny.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='cfg/yolov7-tiny-face.yaml', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/plate.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.face.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--batch-size', type=int, default=32, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
parser.add_argument('--kpt-label', type=int, default=4, help='number of keypoints')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements(exclude=('pycocotools', 'thop'))
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.safe_load(f)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \
'', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
|
2301_81045437/yolov7_plate
|
train.py
|
Python
|
unknown
| 35,208
|
python train.py --batch-size 32 --data data/plate.yaml --img 640 640 --cfg cfg/yolov7-lite-e-plate.yaml --weights weights/yolov7-lite-e.pt --name yolov7 --hyp data/hyp.face.yaml
|
2301_81045437/yolov7_plate
|
train.sh
|
Shell
|
unknown
| 178
|
# init
|
2301_81045437/yolov7_plate
|
utils/__init__.py
|
Python
|
unknown
| 6
|
# Activation functions
import torch
import torch.nn as nn
import torch.nn.functional as F
# SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
class SiLU(nn.Module): # export-friendly version of nn.SiLU()
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
class MemoryEfficientMish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return grad_output * (fx + x * sx * (1 - fx * fx))
def forward(self, x):
return self.F.apply(x)
# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
class FReLU(nn.Module):
def __init__(self, c1, k=3): # ch_in, kernel
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
self.bn = nn.BatchNorm2d(c1)
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
# ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
class AconC(nn.Module):
r""" ACON activation (activate or not).
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
class MetaAconC(nn.Module):
r""" ACON activation (activate or not).
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
# self.bn1 = nn.BatchNorm2d(c2)
# self.bn2 = nn.BatchNorm2d(c1)
def forward(self, x):
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
|
2301_81045437/yolov7_plate
|
utils/activations.py
|
Python
|
unknown
| 3,722
|
# Auto-anchor utils
import numpy as np
import torch
import yaml
from tqdm import tqdm
from utils.general import colorstr
def check_anchor_order(m):
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
a = m.anchor_grid.prod(-1).view(-1) # anchor area
da = a[-1] - a[0] # delta a
ds = m.stride[-1] - m.stride[0] # delta s
if da.sign() != ds.sign(): # same order
print('Reversing anchor order')
m.anchors[:] = m.anchors.flip(0)
m.anchor_grid[:] = m.anchor_grid.flip(0)
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
prefix = colorstr('autoanchor: ')
print(f'\n{prefix}Analyzing anchors... ', end='')
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
def metric(k): # compute metric
r = wh[:, None] / k[None]
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
best = x.max(1)[0] # best_x
aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
bpr = (best > 1. / thr).float().mean() # best possible recall
return bpr, aat
anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors
bpr, aat = metric(anchors)
print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
if bpr < 0.98: # threshold to recompute
print('. Attempting to improve anchors, please wait...')
na = m.anchor_grid.numel() // 2 # number of anchors
try:
anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
except Exception as e:
print(f'{prefix}ERROR: {e}')
new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference
m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
check_anchor_order(m)
print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
else:
print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')
print('') # newline
def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
path: path to dataset *.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
from scipy.cluster.vq import kmeans
thr = 1. / thr
prefix = colorstr('autoanchor: ')
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
# x = wh_iou(wh, torch.tensor(k)) # iou metric
return x, x.max(1)[0] # x, best_x
def anchor_fitness(k): # mutation fitness
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
return (best * (best > thr).float()).mean() # fitness
def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')
print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '
f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
if isinstance(path, str): # *.yaml file
with open(path) as f:
data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
dataset = path # dataset
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans calculation
print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}')
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
k = print_results(k)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
if verbose:
print_results(k)
return print_results(k)
|
2301_81045437/yolov7_plate
|
utils/autoanchor.py
|
Python
|
unknown
| 7,138
|
# init
|
2301_81045437/yolov7_plate
|
utils/aws/__init__.py
|
Python
|
unknown
| 6
|
# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
# This script will run on every instance restart, not only on first start
# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
Content-Type: multipart/mixed; boundary="//"
MIME-Version: 1.0
--//
Content-Type: text/cloud-config; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="cloud-config.txt"
#cloud-config
cloud_final_modules:
- [scripts-user, always]
--//
Content-Type: text/x-shellscript; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="userdata.txt"
#!/bin/bash
# --- paste contents of userdata.sh here ---
--//
|
2301_81045437/yolov7_plate
|
utils/aws/mime.sh
|
Shell
|
unknown
| 780
|
# Resume all interrupted trainings in yolov5/ dir including DDP trainings
# Usage: $ python utils/aws/resume.py
import os
import sys
from pathlib import Path
import torch
import yaml
sys.path.append('./') # to run '$ python *.py' files in subdirectories
port = 0 # --master_port
path = Path('').resolve()
for last in path.rglob('*/**/last.pt'):
ckpt = torch.load(last)
if ckpt['optimizer'] is None:
continue
# Load opt.yaml
with open(last.parent.parent / 'opt.yaml') as f:
opt = yaml.safe_load(f)
# Get device count
d = opt['device'].split(',') # devices
nd = len(d) # number of devices
ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
if ddp: # multi-GPU
port += 1
cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
else: # single-GPU
cmd = f'python train.py --resume {last}'
cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
print(cmd)
os.system(cmd)
|
2301_81045437/yolov7_plate
|
utils/aws/resume.py
|
Python
|
unknown
| 1,095
|
#!/bin/bash
# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
# This script will run only once on first instance start (for a re-start script see mime.sh)
# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
# Use >300 GB SSD
cd home/ubuntu
if [ ! -d yolov5 ]; then
echo "Running first-time script." # install dependencies, download COCO, pull Docker
git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5
cd yolov5
bash data/scripts/get_coco.sh && echo "Data done." &
sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
wait && echo "All tasks done." # finish background tasks
else
echo "Running re-start script." # resume interrupted runs
i=0
list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
while IFS= read -r id; do
((i++))
echo "restarting container $i: $id"
sudo docker start $id
# sudo docker exec -it $id python train.py --resume # single-GPU
sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
done <<<"$list"
fi
|
2301_81045437/yolov7_plate
|
utils/aws/userdata.sh
|
Shell
|
unknown
| 1,237
|
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(
"fonts/platech.ttf", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
if __name__ == '__main__':
imgPath = "result.jpg"
img = cv2.imread(imgPath)
saveImg = cv2ImgAddText(img, '中国加油!', 50, 100, (255, 0, 0), 50)
# cv2.imshow('display',saveImg)
cv2.imwrite('save.jpg',saveImg)
# cv2.waitKey()
|
2301_81045437/yolov7_plate
|
utils/cv_puttext.py
|
Python
|
unknown
| 797
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
resample_segments, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix='', tidl_load=False, kpt_label=False):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix,
tidl_load=tidl_load,
kpt_label=kpt_label)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=False)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(1 / self.fps) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix='',square=False, tidl_load=False, kpt_label=0):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.rect=False
self.tidl_load = tidl_load
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.kpt_label = kpt_label
self.flip_index = [1, 0, 2, 4, 3] #人脸 4是鼻子
self.flip_index_plate = [1, 0, 3, 2] #车牌
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = [x.replace('/', os.sep).split(' ')[0] for x in f if x.split(' ')[0].split('.')[-1].lower() in img_formats]
sorted_index = [i[0] for i in sorted(enumerate(self.img_files), key=lambda x:x[1])]
self.img_files = [self.img_files[index] for index in sorted_index]
if self.tidl_load:
self.img_sizes = [x.replace('/', os.sep).split(' ')[2].split(',') for x in f if x.split(' ')[0].split('.')[-1].lower() in img_formats]
self.img_sizes = [self.img_sizes[index] for index in sorted_index]
self.img_sizes = [[int(dim_size) for dim_size in img_size] for img_size in self.img_sizes]
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix, self.kpt_label), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix, self.kpt_label), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
if not tidl_load:
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
else:
self.batch_shapes = (np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix='', kpt_label=False):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]) and not kpt_label: # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert (l >= 0).all(), 'negative labels'
if kpt_label:
# assert l.shape[1] == kpt_label*3 + 5, 'labels require {} columns each'.format(kpt_label*3+5)
assert l.shape[1] >= 5 + 2 * self.kpt_label, 'labels require 5 + 3* kpt_num columns each'
assert (l[:, 5::3] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert (l[:, 6::3] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
# print("l shape", l.shape)
# kpts = np.zeros((l.shape[0], kpt_label*2+5))
# for i in range(len(l)):
# kpt = np.delete(l[i,5:], np.arange(2, l.shape[1]-5, 3)) #remove the occlusion paramater from the GT
# kpts[i] = np.hstack((l[i, :5], kpt))
# l = kpts
assert l.shape[1] == kpt_label*2+5, 'labels require {} columns each after removing occlusion paramater'.format(kpt_label*2+5)
else:
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l[:, 1:5] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, kpt_label*2+5), dtype=np.float32) if kpt_label else np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0,kpt_label*2+5), dtype=np.float32) if kpt_label else np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
try:
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
if self. tidl_load:
h0, w0 = self.img_sizes[index][:-1] #modify the oroginal size for tidll loaded images
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
before_shape = img.shape
letterbox1 = letterbox(img, shape, auto=False, scaleup=self.augment)
img, ratio, pad = letterbox1
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1], kpt_label=self.kpt_label)
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'],
kpt_label=self.kpt_label)
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.kpt_label:
labels[:, 6::2] /= img.shape[0] # normalized kpt heights 0-1
labels[:, 5::2] /= img.shape[1] # normalized kpt width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
if self.kpt_label:
labels[:, 6::2]= (1-labels[:, 6::2])*(labels[:, 6::2]!=0)
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
if self.kpt_label:
labels[:, 5::2] = (1 - labels[:, 5::2])*(labels[:, 5::2]!=0)
# labels[:, 5::2] = labels[:, 5::2][:, self.flip_index]
# labels[:, 6::2] = labels[:, 6::2][:, self.flip_index]
labels[:, 5::2] = labels[:, 5::2][:, self.flip_index_plate]
labels[:, 6::2] = labels[:, 6::2][:, self.flip_index_plate]
#num_kpts = (labels.shape[1]-5)//2
labels_out = torch.zeros((nL, 6+2*self.kpt_label)) if self.kpt_label else torch.zeros((nL, 6))
if nL:
if self.kpt_label:
labels_out[:, 1:] = torch.from_numpy(labels)
else:
labels_out[:, 1:] = torch.from_numpy(labels[:, :5])
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
#if np.any(np.array(before_shape)>639):
#print("\nbefore:", before_shape, "after:", img.shape)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
# if r<1:
# print("resize ratio:", r)
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh, kpt_label=self.kpt_label) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border,
kpt_label=self.kpt_label) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border,
kpt_label=self.kpt_label) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0), kpt_label=False):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
if kpt_label:
xy_kpts = np.ones((n * kpt_label, 3))
xy_kpts[:, :2] = targets[:,5:].reshape(n*kpt_label, 2) #num_kpt is hardcoded to 17
xy_kpts = xy_kpts @ M.T # transform
xy_kpts = (xy_kpts[:, :2] / xy_kpts[:, 2:3] if perspective else xy_kpts[:, :2]).reshape(n, kpt_label*2) # perspective rescale or affine
xy_kpts[targets[:,5:]==0] = 0
x_kpts = xy_kpts[:, list(range(0,kpt_label*2,2))]
y_kpts = xy_kpts[:, list(range(1,kpt_label*2,2))]
x_kpts[np.logical_or.reduce((x_kpts < 0, x_kpts > width, y_kpts < 0, y_kpts > height))] = 0
y_kpts[np.logical_or.reduce((x_kpts < 0, x_kpts > width, y_kpts < 0, y_kpts > height))] = 0
xy_kpts[:, list(range(0, kpt_label*2, 2))] = x_kpts
xy_kpts[:, list(range(1, kpt_label*2, 2))] = y_kpts
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
if kpt_label:
targets[:, 5:] = xy_kpts[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit('../coco128')
Arguments
path: Path to images directory
weights: Train, val, test weights (list)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
2301_81045437/yolov7_plate
|
utils/datasets.py
|
Python
|
unknown
| 49,848
|
"""Perform test request"""
import pprint
import requests
DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
TEST_IMAGE = "zidane.jpg"
image_data = open(TEST_IMAGE, "rb").read()
response = requests.post(DETECTION_URL, files={"image": image_data}).json()
pprint.pprint(response)
|
2301_81045437/yolov7_plate
|
utils/flask_rest_api/example_request.py
|
Python
|
unknown
| 299
|
"""
Run a rest API exposing the yolov5s object detection model
"""
import argparse
import io
import torch
from PIL import Image
from flask import Flask, request
app = Flask(__name__)
DETECTION_URL = "/v1/object-detection/yolov5s"
@app.route(DETECTION_URL, methods=["POST"])
def predict():
if not request.method == "POST":
return
if request.files.get("image"):
image_file = request.files["image"]
image_bytes = image_file.read()
img = Image.open(io.BytesIO(image_bytes))
results = model(img, size=640) # reduce size=320 for faster inference
return results.pandas().xyxy[0].to_json(orient="records")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
parser.add_argument("--port", default=5000, type=int, help="port number")
args = parser.parse_args()
model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
|
2301_81045437/yolov7_plate
|
utils/flask_rest_api/restapi.py
|
Python
|
unknown
| 1,078
|
# YOLOv5 general utils
import glob
import logging
import math
import os
import platform
import random
import re
import subprocess
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
import torchvision
import yaml
from utils.google_utils import gsutil_getsize
from utils.metrics import fitness
from utils.torch_utils import init_torch_seeds
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
def set_logging(rank=-1, verbose=True):
logging.basicConfig(
format="%(message)s",
level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def isdocker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def file_size(file):
# Return file size in MB
return Path(file).stat().st_size / 1e6
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
return True
except OSError:
return False
def check_git_status():
# Recommend 'git pull' if code is out of date
print(colorstr('github: '), end='')
try:
assert Path('.git').exists(), 'skipping check (not a git repository)'
assert not isdocker(), 'skipping check (Docker image)'
assert check_online(), 'skipping check (offline)'
cmd = 'git fetch && git config --get remote.origin.url'
url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
f"Use 'git pull' to update or 'git clone {url}' to download latest."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
except Exception as e:
print(e)
def check_requirements(requirements='requirements.txt', exclude=()):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
import pkg_resources as pkg
prefix = colorstr('red', 'bold', 'requirements:')
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
if not file.exists():
print(f"{prefix} {file.resolve()} not found, check failed.")
return
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
n += 1
print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...")
print(subprocess.check_output(f"pip install '{r}'", shell=True).decode())
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s)) # emoji-safe
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_file(file):
# Search for file if not found
if Path(file).is_file() or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), f'File Not Found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(dict):
# Download dataset if not found locally
val, s = dict.get('val'), dict.get('download')
if val and len(val):
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
print(f'Downloading {s} ...')
torch.hub.download_url_to_file(s, f)
r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip
elif s.startswith('bash '): # bash script
print(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s) # return None
print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result
else:
raise Exception('Dataset not found.')
def download(url, dir='.', threads=1):
# Multi-threaded file download and unzip function
def download_one(url, dir):
# Download 1 file
f = dir / Path(url).name # filename
if not f.exists():
print(f'Downloading {url} to {f}...')
torch.hub.download_url_to_file(url, f, progress=True) # download
if f.suffix in ('.zip', '.gz'):
print(f'Unzipping {f}...')
if f.suffix == '.zip':
os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite
elif f.suffix == '.gz':
os.system(f'tar xfz {f} --directory {f.parent} && rm {f}') # unzip
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
ThreadPool(threads).imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
else:
for u in tuple(url) if isinstance(url, str) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywh2xyxy_export(cx,cy,w,h):
#This function is used while exporting ONNX models
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
#y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
halfw = w/2
halfh = h/2
xmin = cx - halfw # top left x
ymin = cy - halfh # top left y
xmax = cx + halfw # bottom right x
ymax = cy + halfh # bottom right y
return torch.cat((xmin, ymin, xmax, ymax), 1)
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0, kpt_label=False):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
# it does the same operation as above for the key-points
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
if kpt_label:
num_kpts = (x.shape[1]-4)//2
for kpt in range(num_kpts):
for kpt_instance in range(y.shape[0]):
if y[kpt_instance, 2 * kpt + 4]!=0:
y[kpt_instance, 2*kpt+4] = w * y[kpt_instance, 2*kpt+4] + padw
if y[kpt_instance, 2 * kpt + 1 + 4] !=0:
y[kpt_instance, 2*kpt+1+4] = h * y[kpt_instance, 2*kpt+1+4] + padh
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, kpt_label=False, step=2):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0]
pad = ratio_pad[1]
if isinstance(gain, (list, tuple)):
gain = gain[0]
if not kpt_label:
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, [0, 2]] /= gain
coords[:, [1, 3]] /= gain
clip_coords(coords[0:4], img0_shape)
#coords[:, 0:4] = coords[:, 0:4].round()
else:
coords[:, 0::step] -= pad[0] # x padding
coords[:, 1::step] -= pad[1] # y padding
coords[:, 0::step] /= gain
coords[:, 1::step] /= gain
clip_coords(coords, img0_shape, step=step)
#coords = coords.round()
return coords
def clip_coords(boxes, img_shape, step=2):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0::step].clamp_(0, img_shape[1]) # x1
boxes[:, 1::step].clamp_(0, img_shape[0]) # y1
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), kpt_label=5, nc=None):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
if nc is None:
nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 5 - kpt_label * 3 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
#max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 1000.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0,5+nc), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:5+nc] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float(),x[i,5+nc:]), 1)
else: # best class only
if not kpt_label:
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
else:
kpts = x[:, 5+nc:]
conf, j = x[:, 5:5+nc].max(1, keepdim=True)
x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
#if i.shape[0] > max_det: # limit detections
# i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def non_max_suppression_export(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
kpt_label=5, nc=None, labels=()):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
if nc is None:
nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 5 - kpt_label * 3 # number of classes
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
xc = prediction[..., 4] > conf_thres # candidates
output = [torch.zeros((0, kpt_label*3+6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
x = x[xc[xi]] # confidence
# Compute conf
cx, cy, w, h = x[:,0:1], x[:,1:2], x[:,2:3], x[:,3:4]
obj_conf = x[:, 4:5]
cls_conf = x[:, 5:5+nc]
kpts = x[:, 6:]
cls_conf = obj_conf * cls_conf # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy_export(cx, cy, w, h)
conf, j = cls_conf.max(1, keepdim=True)
x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres]
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] +c , x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
output[xi] = x[i]
return output
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
url = 'gs://%s/evolve.txt' % bucket
if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, 'w') as f:
results = tuple(x[0, :7])
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
# Apply a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False):
# Save an image crop as {file} with crop size multiplied by {gain} and padded by {pad} pixels
xyxy = torch.tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
if square:
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
xyxy = xywh2xyxy(b).long()
clip_coords(xyxy, im.shape)
crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2])]
cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop if BGR else crop[..., ::-1])
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
suffix = path.suffix
path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # update path
dir = path if path.suffix == '' else path.parent # directory
if not dir.exists() and mkdir:
dir.mkdir(parents=True, exist_ok=True) # make directory
return path
|
2301_81045437/yolov7_plate
|
utils/general.py
|
Python
|
unknown
| 30,697
|
# Google utils: https://cloud.google.com/storage/docs/reference/libraries
import os
import platform
import subprocess
import time
from pathlib import Path
import requests
import torch
def gsutil_getsize(url=''):
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
return eval(s.split(' ')[0]) if len(s) else 0 # bytes
def attempt_download(file, repo='ultralytics/yolov5'):
# Attempt file download if does not exist
file = Path(str(file).strip().replace("'", ''))
if not file.exists():
try:
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v1.0'
except: # fallback plan
assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
try:
tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
except:
tag = 'v5.0' # current release
name = file.name
if name in assets:
msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/'
redundant = False # second download option
try: # GitHub
url = f'https://github.com/{repo}/releases/download/{tag}/{name}'
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert file.exists() and file.stat().st_size > 1E6 # check
except Exception as e: # GCP
print(f'Download error: {e}')
assert redundant, 'No secondary mirror'
url = f'https://storage.googleapis.com/{repo}/ckpt/{name}'
print(f'Downloading {url} to {file}...')
os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights)
finally:
if not file.exists() or file.stat().st_size < 1E6: # check
file.unlink(missing_ok=True) # remove partial downloads
print(f'ERROR: Download failure: {msg}')
print('')
return
def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
# Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download()
t = time.time()
file = Path(file)
cookie = Path('cookie') # gdrive cookie
print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
file.unlink(missing_ok=True) # remove existing file
cookie.unlink(missing_ok=True) # remove existing cookie
# Attempt file download
out = "NUL" if platform.system() == "Windows" else "/dev/null"
os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
if os.path.exists('cookie'): # large file
s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
else: # small file
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s) # execute, capture return
cookie.unlink(missing_ok=True) # remove existing cookie
# Error check
if r != 0:
file.unlink(missing_ok=True) # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
if file.suffix == '.zip':
print('unzipping... ', end='')
os.system(f'unzip -q {file}') # unzip
file.unlink() # remove zip to free space
print(f'Done ({time.time() - t:.1f}s)')
return r
def get_token(cookie="./cookie"):
with open(cookie) as f:
for line in f:
if "download" in line:
return line.split()[-1]
return ""
# def upload_blob(bucket_name, source_file_name, destination_blob_name):
# # Uploads a file to a bucket
# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
#
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(destination_blob_name)
#
# blob.upload_from_filename(source_file_name)
#
# print('File {} uploaded to {}.'.format(
# source_file_name,
# destination_blob_name))
#
#
# def download_blob(bucket_name, source_blob_name, destination_file_name):
# # Uploads a blob from a bucket
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(source_blob_name)
#
# blob.download_to_filename(destination_file_name)
#
# print('Blob {} downloaded to {}.'.format(
# source_blob_name,
# destination_file_name))
|
2301_81045437/yolov7_plate
|
utils/google_utils.py
|
Python
|
unknown
| 5,059
|