VWED_server/services/map_data_service.py
2025-07-14 10:29:37 +08:00

514 lines
21 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
地图数据推送服务
处理地图数据推送时的动作点和库区数据存储
"""
import uuid
from typing import List, Dict, Any, Optional
from sqlalchemy.orm import Session
from sqlalchemy import and_
from data.models import StorageArea, OperatePoint, OperatePointLayer, StorageAreaType
from routes.model.map_model import (
MapDataPushRequest, MapDataPushResponse, MapDataQueryRequest,
MapDataQueryResponse, StorageAreaData, OperatePointData,
OperatePointLayerData, StorageAreaTypeEnum
)
from utils.logger import get_logger
from config.settings import settings
logger = get_logger("services.map_data_service")
class MapDataService:
"""地图数据推送服务"""
@staticmethod
def push_map_data(db: Session, request: MapDataPushRequest) -> MapDataPushResponse:
"""
推送地图数据
Args:
db: 数据库会话
request: 地图数据推送请求
Returns:
MapDataPushResponse: 推送结果
"""
try:
# 初始化计数器
storage_areas_count = 0
operate_points_count = 0
layers_count = 0
# 先查询数据库是否有相关场景数据
existing_storage_areas = db.query(StorageArea).filter(
and_(
StorageArea.scene_id == request.scene_id,
StorageArea.is_deleted == False
)
).first()
existing_operate_points = db.query(OperatePoint).filter(
and_(
OperatePoint.scene_id == request.scene_id,
OperatePoint.is_deleted == False
)
).first()
# 记录是否进行了覆盖操作
has_existing_data = existing_storage_areas or existing_operate_points
# 如果数据库有相关场景数据,则删除现有数据
if has_existing_data:
logger.info(f"发现现有场景数据,开始删除: 场景ID={request.scene_id}")
MapDataService._delete_existing_data(db, request.scene_id)
else:
logger.info(f"未发现现有场景数据,直接创建: 场景ID={request.scene_id}")
# 处理库区数据 - 创建新数据
logger.info(f"开始创建库区数据: 数量={len(request.storage_areas)}")
for area_data in request.storage_areas:
new_area = MapDataService._create_storage_area(area_data, request.scene_id, request.operate_points)
db.add(new_area)
storage_areas_count += 1
# 创建库区ID到类型和名称的映射
storage_area_type_mapping = {area.id: area.area_type for area in request.storage_areas}
storage_area_name_mapping = {area.id: area.area_name for area in request.storage_areas}
# 检查动作点站点名称重复
duplicate_stations = []
valid_operate_points = []
# 查询数据库中该场景下现有的站点名称和库位名称(未删除的)
existing_station_names = set()
existing_location_names = set()
if not has_existing_data: # 如果没有现有数据需要覆盖,则需要检查数据库中的站点名称和库位名称
existing_points = db.query(OperatePoint.station_name, OperatePoint.storage_location_name).filter(
and_(
OperatePoint.scene_id == request.scene_id,
OperatePoint.is_deleted == False
)
).all()
existing_station_names = {point.station_name for point in existing_points}
existing_location_names = {point.storage_location_name for point in existing_points}
# 检查请求中的动作点是否有重复的站点名称和库位名称(包括与数据库中现有数据的重复)
seen_station_names = set()
seen_location_names = set()
for point_data in request.operate_points:
# 检查是否与数据库中现有数据重复
if point_data.station_name in existing_station_names:
duplicate_stations.append(point_data.station_name)
logger.warning(f"发现与数据库中现有数据重复的站点名称: {point_data.station_name}")
continue
if point_data.storage_location_name in existing_location_names:
duplicate_stations.append(f"{point_data.station_name}(库位名重复)")
logger.warning(f"发现与数据库中现有数据重复的库位名称: {point_data.storage_location_name}")
continue
# 检查是否在请求中重复
if point_data.station_name in seen_station_names:
duplicate_stations.append(point_data.station_name)
logger.warning(f"发现在请求中重复的站点名称: {point_data.station_name}")
continue
if point_data.storage_location_name in seen_location_names:
duplicate_stations.append(f"{point_data.station_name}(库位名重复)")
logger.warning(f"发现在请求中重复的库位名称: {point_data.storage_location_name}")
continue
seen_station_names.add(point_data.station_name)
seen_location_names.add(point_data.storage_location_name)
valid_operate_points.append(point_data)
# 处理动作点数据 - 创建新数据
logger.info(f"开始创建动作点数据: 数量={len(valid_operate_points)}")
for point_data in valid_operate_points:
new_point = MapDataService._create_operate_point(point_data, request.scene_id, storage_area_type_mapping, storage_area_name_mapping)
db.add(new_point)
operate_points_count += 1
# 处理分层数据
if point_data.layers:
layer_counts = MapDataService._handle_layers(
db, new_point, point_data.layers, is_update=False
)
layers_count += layer_counts['created']
# 提交事务
db.commit()
logger.info(f"地图数据推送成功: 场景ID={request.scene_id}, "
f"库区={storage_areas_count}个, 动作点={operate_points_count}个, "
f"分层={layers_count}")
# 根据是否进行了覆盖操作和是否有重复站点生成不同的消息
result_message = ""
if has_existing_data:
result_message = f"推送成功,已覆盖现有数据。创建了{storage_areas_count}个库区,{operate_points_count}个动作点,{layers_count}个分层"
else:
result_message = f"推送成功,创建了{storage_areas_count}个库区,{operate_points_count}个动作点,{layers_count}个分层"
# 如果有重复的站点,添加提示信息
if duplicate_stations:
duplicate_count = len(duplicate_stations)
duplicate_names = ", ".join(set(duplicate_stations)) # 去重后的站点名称
result_message += f"。检测到{duplicate_count}个重复的站点名称已被过滤:{duplicate_names}"
return MapDataPushResponse(
scene_id=request.scene_id,
storage_areas_count=storage_areas_count,
operate_points_count=operate_points_count,
layers_count=layers_count,
message=result_message
)
except Exception as e:
db.rollback()
logger.error(f"地图数据推送失败: {str(e)}")
raise
@staticmethod
def query_map_data(db: Session, request: MapDataQueryRequest) -> MapDataQueryResponse:
"""
查询地图数据
Args:
db: 数据库会话
request: 地图数据查询请求
Returns:
MapDataQueryResponse: 查询结果
"""
try:
# 查询库区数据
storage_areas_query = db.query(StorageArea).filter(
and_(
StorageArea.scene_id == request.scene_id,
StorageArea.is_deleted == False
)
)
if request.area_type:
storage_areas_query = storage_areas_query.filter(
StorageArea.area_type == request.area_type
)
storage_areas = storage_areas_query.all()
# 查询动作点数据
operate_points_query = db.query(OperatePoint).filter(
and_(
OperatePoint.scene_id == request.scene_id,
OperatePoint.is_deleted == False
)
)
operate_points = operate_points_query.all()
# 统计数据
total_capacity = sum(area.max_capacity for area in storage_areas)
used_capacity = sum(area.current_usage for area in storage_areas)
dense_areas_count = sum(1 for area in storage_areas if area.area_type == StorageAreaType.DENSE)
general_areas_count = len(storage_areas) - dense_areas_count
# 查询分层数据
total_layers = 0
occupied_layers = 0
if request.include_layers:
for point in operate_points:
layers = db.query(OperatePointLayer).filter(
and_(
OperatePointLayer.operate_point_id == point.id,
OperatePointLayer.is_deleted == False
)
).all()
total_layers += len(layers)
occupied_layers += sum(1 for layer in layers if layer.is_occupied)
# 转换为响应格式
storage_areas_data = []
for area in storage_areas:
area_dict = area.to_dict()
area_dict['area_type'] = area.area_type.value
storage_areas_data.append(area_dict)
operate_points_data = []
for point in operate_points:
point_dict = point.to_dict()
# 添加库区类型信息
if point.storage_area_type:
point_dict['storage_area_type'] = point.storage_area_type.value
# 添加库区名称信息
if point.area_name:
point_dict['area_name'] = point.area_name
if request.include_layers:
# 包含分层数据
layers = db.query(OperatePointLayer).filter(
and_(
OperatePointLayer.operate_point_id == point.id,
OperatePointLayer.is_deleted == False
)
).order_by(OperatePointLayer.layer_index).all()
point_dict['layers'] = [layer.to_dict() for layer in layers]
operate_points_data.append(point_dict)
return MapDataQueryResponse(
scene_id=request.scene_id,
storage_areas=storage_areas_data,
operate_points=operate_points_data,
total_capacity=total_capacity,
used_capacity=used_capacity,
dense_areas_count=dense_areas_count,
general_areas_count=general_areas_count,
total_layers=total_layers,
occupied_layers=occupied_layers
)
except Exception as e:
logger.error(f"查询地图数据失败: {str(e)}")
raise
@staticmethod
def _delete_existing_data(db: Session, scene_id: str):
"""删除现有数据"""
# 软删除动作点分层
db.query(OperatePointLayer).filter(
OperatePointLayer.operate_point_id.in_(
db.query(OperatePoint.id).filter(
and_(
OperatePoint.scene_id == scene_id,
OperatePoint.is_deleted == False
)
)
)
).update({OperatePointLayer.is_deleted: True})
# 软删除动作点
db.query(OperatePoint).filter(
and_(
OperatePoint.scene_id == scene_id,
OperatePoint.is_deleted == False
)
).update({OperatePoint.is_deleted: True})
# 软删除库区
db.query(StorageArea).filter(
and_(
StorageArea.scene_id == scene_id,
StorageArea.is_deleted == False
)
).update({StorageArea.is_deleted: True})
@staticmethod
def _calculate_storage_area_capacity(area_type: str, operate_points_data: List) -> int:
"""
计算库区容量
Args:
area_type: 库区类型
operate_points_data: 属于该库区的动作点数据列表
Returns:
int: 计算出的容量
"""
# 根据库区类型从配置中获取参数
if area_type == "dense":
base_capacity = settings.MAP_DENSE_STORAGE_BASE_CAPACITY
capacity_per_point = settings.MAP_DENSE_STORAGE_CAPACITY_PER_POINT
layer_multiplier = settings.MAP_DENSE_STORAGE_LAYER_MULTIPLIER
else: # general
base_capacity = settings.MAP_GENERAL_STORAGE_BASE_CAPACITY
capacity_per_point = settings.MAP_GENERAL_STORAGE_CAPACITY_PER_POINT
layer_multiplier = settings.MAP_GENERAL_STORAGE_LAYER_MULTIPLIER
# 基础容量
total_capacity = base_capacity
# 根据动作点数量和层数计算额外容量
for point_data in operate_points_data:
point_capacity = capacity_per_point
# 如果有多层,应用层数倍数
if point_data.max_layers > 1:
point_capacity = int(point_capacity * layer_multiplier * point_data.max_layers)
total_capacity += point_capacity
return total_capacity
@staticmethod
def _create_storage_area(area_data: StorageAreaData, scene_id: str,
operate_points_data: List[OperatePointData]) -> StorageArea:
"""创建库区"""
# 筛选属于该库区的动作点
area_points = [point for point in operate_points_data if point.storage_area_id == area_data.id]
# 系统自动计算容量
max_capacity = MapDataService._calculate_storage_area_capacity(
area_data.area_type.value, area_points
)
return StorageArea(
id=area_data.id,
area_name=area_data.area_name,
area_code=area_data.area_code,
area_type=StorageAreaType(area_data.area_type),
scene_id=scene_id,
max_capacity=max_capacity,
description=area_data.description,
tags=area_data.tags
)
@staticmethod
def _create_operate_point(point_data: OperatePointData, scene_id: str,
storage_area_type_mapping: Dict[str, StorageAreaTypeEnum],
storage_area_name_mapping: Dict[str, str]) -> OperatePoint:
"""创建动作点"""
# 根据库区ID获取库区类型和名称
storage_area_type = None
area_name = None
if point_data.storage_area_id and point_data.storage_area_id in storage_area_type_mapping:
storage_area_type = StorageAreaType(storage_area_type_mapping[point_data.storage_area_id])
area_name = storage_area_name_mapping.get(point_data.storage_area_id)
# 自动生成UUID作为动作点ID
operate_point_id = str(uuid.uuid4())
return OperatePoint(
id=operate_point_id,
station_name=point_data.station_name,
storage_location_name=point_data.storage_location_name,
scene_id=scene_id,
storage_area_id=point_data.storage_area_id,
storage_area_type=storage_area_type,
area_name=area_name,
max_layers=point_data.max_layers,
position_x=point_data.position_x,
position_y=point_data.position_y,
position_z=point_data.position_z,
content=point_data.content or "",
tags=point_data.tags or "",
description=point_data.description
)
@staticmethod
def _handle_layers(db: Session, operate_point: OperatePoint,
layers_data: Optional[List[OperatePointLayerData]],
is_update: bool = False) -> Dict[str, int]:
"""
处理分层数据
Args:
db: 数据库会话
operate_point: 动作点对象
layers_data: 分层数据列表
is_update: 是否为更新操作现在始终为False
Returns:
Dict[str, int]: 创建的分层数量统计
"""
created_count = 0
if not layers_data:
return {'created': created_count, 'updated': 0}
# 创建新分层
for layer_data in layers_data:
layer_id = str(uuid.uuid4())
new_layer = OperatePointLayer(
id=layer_id,
operate_point_id=operate_point.id,
station_name=operate_point.station_name,
storage_location_name=operate_point.storage_location_name,
area_id=operate_point.storage_area_id, # 添加库区ID冗余字段
area_name=operate_point.area_name, # 添加库区名称
scene_id=operate_point.scene_id, # 添加场景ID冗余字段
layer_index=layer_data.layer_index,
layer_name=layer_data.layer_name,
max_weight=layer_data.max_weight,
max_volume=layer_data.max_volume,
layer_height=layer_data.layer_height,
description=layer_data.description,
tags=layer_data.tags
)
db.add(new_layer)
# 为新创建的库位层同步扩展属性
try:
MapDataService._sync_extended_properties_to_new_layer(db, new_layer)
logger.debug(f"为新库位层 {new_layer.id} 同步扩展属性成功")
except Exception as e:
logger.error(f"为新库位层 {new_layer.id} 同步扩展属性失败: {str(e)}")
# 不抛出异常,避免影响地图推送的主流程
created_count += 1
return {'created': created_count, 'updated': 0}
@staticmethod
def _sync_extended_properties_to_new_layer(db: Session, layer: OperatePointLayer):
"""
将所有已启用的扩展属性同步到新创建的库位层
Args:
db: 数据库会话
layer: 新创建的库位层对象
"""
try:
# 导入扩展属性模型(在方法内导入避免循环导入)
from data.models import ExtendedProperty
import json
import datetime
# 获取所有已启用的扩展属性
extended_properties = db.query(ExtendedProperty).filter(
ExtendedProperty.is_deleted == False,
ExtendedProperty.is_enabled == True
).all()
if not extended_properties:
# 如果没有扩展属性,则不需要处理
return
# 解析现有的config_json
config = {}
if layer.config_json:
try:
config = json.loads(layer.config_json)
except Exception as e:
logger.error(f"解析库位层 {layer.id} 的config_json失败: {str(e)}")
config = {}
# 确保extended_fields字段存在
if 'extended_fields' not in config:
config['extended_fields'] = {}
# 同步所有扩展属性
for prop in extended_properties:
config['extended_fields'][prop.property_name] = {
'value': prop.default_value,
'type': prop.property_type.value,
'is_required': prop.is_required,
'updated_at': datetime.datetime.now().isoformat()
}
# 更新config_json
layer.config_json = json.dumps(config, ensure_ascii=False, indent=2)
logger.debug(f"为库位层 {layer.id} 同步了 {len(extended_properties)} 个扩展属性")
except Exception as e:
logger.error(f"同步扩展属性到库位层失败: {str(e)}")
raise