#!/usr/bin/env python # -*- coding: utf-8 -*- """ 地图数据推送服务 处理地图数据推送时的动作点和库区数据存储 """ import uuid import datetime from typing import List, Dict, Any, Optional from sqlalchemy.orm import Session from sqlalchemy import and_ from data.models import StorageArea, OperatePoint, OperatePointLayer, StorageAreaType from routes.model.map_model import ( MapDataPushRequest, MapDataPushResponse, MapDataQueryRequest, MapDataQueryResponse, StorageAreaData, OperatePointData, OperatePointLayerData, StorageAreaTypeEnum ) from utils.logger import get_logger from config.settings import settings logger = get_logger("services.map_data_service") class MapDataService: """地图数据推送服务""" @staticmethod def push_map_data(db: Session, request: MapDataPushRequest) -> MapDataPushResponse: """ 推送地图数据 - 增量更新模式 该方法采用增量更新的方式,不会删除现有数据,而是在原有基础上增加或更新数据: - 库区:基于area_name判断,存在则更新,不存在则新增 - 动作点:基于station_name判断,存在则更新,不存在则新增 - 分层:基于layer_name判断,存在则更新,不存在则新增 Args: db: 数据库会话 request: 地图数据推送请求 Returns: MapDataPushResponse: 推送结果 """ try: # 初始化计数器 storage_areas_created = 0 storage_areas_updated = 0 operate_points_created = 0 operate_points_updated = 0 layers_created = 0 layers_updated = 0 logger.info(f"开始增量推送地图数据: 场景ID={request.scene_id}") # 处理库区数据 - 增量更新 logger.info(f"开始处理库区数据: 数量={len(request.storage_areas)}") for area_data in request.storage_areas: is_new = MapDataService._upsert_storage_area(db, area_data, request.scene_id, request.operate_points) if is_new: storage_areas_created += 1 else: storage_areas_updated += 1 # 检查动作点站点名称重复 duplicate_stations = [] valid_operate_points = [] # 检查请求中的动作点是否有重复的站点名称和库位名称 seen_station_names = set() seen_location_names = set() for point_data in request.operate_points: # 检查是否在请求中重复 if point_data.station_name in seen_station_names: duplicate_stations.append(point_data.station_name) logger.warning(f"发现在请求中重复的站点名称: {point_data.station_name}") continue if point_data.storage_location_name in seen_location_names: duplicate_stations.append(f"{point_data.station_name}(库位名重复)") logger.warning(f"发现在请求中重复的库位名称: {point_data.storage_location_name}") continue seen_station_names.add(point_data.station_name) seen_location_names.add(point_data.storage_location_name) valid_operate_points.append(point_data) # 处理动作点数据 - 增量更新 logger.info(f"开始处理动作点数据: 数量={len(valid_operate_points)}") for point_data in valid_operate_points: is_new, operate_point = MapDataService._upsert_operate_point( db, point_data, request.scene_id ) if is_new: operate_points_created += 1 else: operate_points_updated += 1 # 处理分层数据 - 增量更新 if point_data.layers: layer_counts = MapDataService._upsert_layers( db, operate_point, point_data.layers ) layers_created += layer_counts['created'] layers_updated += layer_counts['updated'] # 提交事务 db.commit() logger.info(f"地图数据推送成功: 场景ID={request.scene_id}, " f"库区(新增={storage_areas_created},更新={storage_areas_updated}), " f"动作点(新增={operate_points_created},更新={operate_points_updated}), " f"分层(新增={layers_created},更新={layers_updated})") # 生成响应消息 result_message = f"增量推送成功。库区:新增{storage_areas_created}个,更新{storage_areas_updated}个;" result_message += f"动作点:新增{operate_points_created}个,更新{operate_points_updated}个;" result_message += f"分层:新增{layers_created}个,更新{layers_updated}个" # 如果有重复的站点,添加提示信息 if duplicate_stations: duplicate_count = len(duplicate_stations) duplicate_names = ", ".join(set(duplicate_stations)) # 去重后的站点名称 result_message += f"。检测到{duplicate_count}个重复的站点名称已被过滤:{duplicate_names}" return MapDataPushResponse( scene_id=request.scene_id, storage_areas_count=storage_areas_created + storage_areas_updated, operate_points_count=operate_points_created + operate_points_updated, layers_count=layers_created + layers_updated, message=result_message ) except Exception as e: db.rollback() logger.error(f"地图数据推送失败: {str(e)}") raise @staticmethod def _upsert_storage_area(db: Session, area_data: StorageAreaData, scene_id: str, operate_points_data: List[OperatePointData]) -> bool: """ 增量更新库区数据 Args: db: 数据库会话 area_data: 库区数据 scene_id: 场景ID operate_points_data: 动作点数据列表 Returns: bool: True表示新增,False表示更新 """ # 查找现有库区(基于area_name和scene_id) existing_area = db.query(StorageArea).filter( and_( StorageArea.area_name == area_data.area_name, StorageArea.scene_id == scene_id, StorageArea.is_deleted == False ) ).first() # 筛选属于该库区的动作点 area_points = [point for point in operate_points_data if point.area_name == area_data.area_name] # 系统自动计算容量 max_capacity = MapDataService._calculate_storage_area_capacity( area_data.area_type.value, area_points ) if existing_area: # 更新现有库区 existing_area.area_type = StorageAreaType(area_data.area_type) existing_area.max_capacity = max_capacity existing_area.description = area_data.description existing_area.tags = area_data.tags existing_area.select_logic = area_data.select_logic existing_area.updated_at = datetime.datetime.now() logger.info(f"更新库区: {area_data.area_name}") return False else: # 创建新库区 new_area = StorageArea( id=str(uuid.uuid4()), area_name=area_data.area_name, area_type=StorageAreaType(area_data.area_type), scene_id=scene_id, max_capacity=max_capacity, description=area_data.description, tags=area_data.tags, select_logic=area_data.select_logic ) db.add(new_area) logger.info(f"创建新库区: {area_data.area_name}") return True @staticmethod def _upsert_operate_point(db: Session, point_data: OperatePointData, scene_id: str) -> tuple[bool, OperatePoint]: """ 增量更新动作点数据 Args: db: 数据库会话 point_data: 动作点数据 scene_id: 场景ID Returns: tuple[bool, OperatePoint]: (是否新增, 动作点对象) """ # 查找现有动作点(基于station_name和scene_id) existing_point = db.query(OperatePoint).filter( and_( OperatePoint.station_name == point_data.station_name, OperatePoint.scene_id == scene_id, OperatePoint.is_deleted == False ) ).first() # 根据库区名称获取库区信息 storage_area = None if point_data.area_name: storage_area = db.query(StorageArea).filter( and_( StorageArea.area_name == point_data.area_name, StorageArea.scene_id == scene_id, StorageArea.is_deleted == False ) ).first() if existing_point: # 更新现有动作点 existing_point.storage_location_name = point_data.storage_location_name existing_point.storage_area_id = storage_area.id if storage_area else None existing_point.storage_area_type = storage_area.area_type if storage_area else None existing_point.area_name = point_data.area_name existing_point.max_layers = point_data.max_layers existing_point.position_x = point_data.position_x existing_point.position_y = point_data.position_y existing_point.position_z = point_data.position_z existing_point.content = point_data.content or "" existing_point.tags = point_data.tags or "" existing_point.description = point_data.description existing_point.updated_at = datetime.datetime.now() logger.info(f"更新动作点: {point_data.station_name}") return False, existing_point else: # 创建新动作点 new_point = OperatePoint( id=str(uuid.uuid4()), station_name=point_data.station_name, storage_location_name=point_data.storage_location_name, scene_id=scene_id, storage_area_id=storage_area.id if storage_area else None, storage_area_type=storage_area.area_type if storage_area else None, area_name=point_data.area_name, max_layers=point_data.max_layers, position_x=point_data.position_x, position_y=point_data.position_y, position_z=point_data.position_z, content=point_data.content or "", tags=point_data.tags or "", description=point_data.description ) db.add(new_point) logger.info(f"创建新动作点: {point_data.station_name}") return True, new_point @staticmethod def _upsert_layers(db: Session, operate_point: OperatePoint, layers_data: List[OperatePointLayerData]) -> Dict[str, int]: """ 增量更新分层数据 Args: db: 数据库会话 operate_point: 动作点对象 layers_data: 分层数据列表 Returns: Dict[str, int]: 创建和更新的分层数量统计 """ created_count = 0 updated_count = 0 if not layers_data: return {'created': created_count, 'updated': updated_count} for index, layer_data in enumerate(layers_data, 1): # 自动生成层索引(从1开始) layer_index = index # 查找现有分层(基于layer_name和operate_point_id) existing_layer = db.query(OperatePointLayer).filter( and_( OperatePointLayer.layer_name == layer_data.layer_name, OperatePointLayer.operate_point_id == operate_point.id, OperatePointLayer.is_deleted == False ) ).first() if existing_layer: # 更新现有分层 existing_layer.layer_index = layer_index existing_layer.max_weight = layer_data.max_weight existing_layer.max_volume = layer_data.max_volume existing_layer.layer_height = layer_data.layer_height existing_layer.description = layer_data.description existing_layer.tags = layer_data.tags existing_layer.updated_at = datetime.datetime.now() logger.debug(f"更新分层: {layer_data.layer_name} (layer_index={layer_index})") updated_count += 1 else: # 创建新分层 new_layer = OperatePointLayer( id=str(uuid.uuid4()), operate_point_id=operate_point.id, station_name=operate_point.station_name, storage_location_name=operate_point.storage_location_name, area_name=operate_point.area_name, scene_id=operate_point.scene_id, layer_index=layer_index, layer_name=layer_data.layer_name, max_weight=layer_data.max_weight, max_volume=layer_data.max_volume, layer_height=layer_data.layer_height, description=layer_data.description, tags=layer_data.tags ) db.add(new_layer) # 为新创建的库位层同步扩展属性 try: MapDataService._sync_extended_properties_to_new_layer(db, new_layer) logger.debug(f"为新库位层 {new_layer.id} 同步扩展属性成功") except Exception as e: logger.error(f"为新库位层 {new_layer.id} 同步扩展属性失败: {str(e)}") logger.debug(f"创建新分层: {layer_data.layer_name} (layer_index={layer_index})") created_count += 1 return {'created': created_count, 'updated': updated_count} @staticmethod def query_map_data(db: Session, request: MapDataQueryRequest) -> MapDataQueryResponse: """ 查询地图数据 Args: db: 数据库会话 request: 地图数据查询请求 Returns: MapDataQueryResponse: 查询结果 """ try: # 查询库区数据 storage_areas_query = db.query(StorageArea).filter( and_( StorageArea.scene_id == request.scene_id, StorageArea.is_deleted == False ) ) if request.area_type: storage_areas_query = storage_areas_query.filter( StorageArea.area_type == request.area_type ) storage_areas = storage_areas_query.all() # 查询动作点数据 operate_points_query = db.query(OperatePoint).filter( and_( OperatePoint.scene_id == request.scene_id, OperatePoint.is_deleted == False ) ) operate_points = operate_points_query.all() # 统计数据 total_capacity = sum(area.max_capacity for area in storage_areas) used_capacity = sum(area.current_usage for area in storage_areas) dense_areas_count = sum(1 for area in storage_areas if area.area_type == StorageAreaType.DENSE) general_areas_count = len(storage_areas) - dense_areas_count # 查询分层数据 total_layers = 0 occupied_layers = 0 if request.include_layers: for point in operate_points: layers = db.query(OperatePointLayer).filter( and_( OperatePointLayer.operate_point_id == point.id, OperatePointLayer.is_deleted == False ) ).all() total_layers += len(layers) occupied_layers += sum(1 for layer in layers if layer.is_occupied) # 转换为响应格式 storage_areas_data = [] for area in storage_areas: area_dict = area.to_dict() area_dict['area_type'] = area.area_type.value storage_areas_data.append(area_dict) operate_points_data = [] for point in operate_points: point_dict = point.to_dict() # 添加库区类型信息 if point.storage_area_type: point_dict['storage_area_type'] = point.storage_area_type.value # 添加库区名称信息 if point.area_name: point_dict['area_name'] = point.area_name if request.include_layers: # 包含分层数据 layers = db.query(OperatePointLayer).filter( and_( OperatePointLayer.operate_point_id == point.id, OperatePointLayer.is_deleted == False ) ).order_by(OperatePointLayer.layer_index).all() point_dict['layers'] = [layer.to_dict() for layer in layers] operate_points_data.append(point_dict) return MapDataQueryResponse( scene_id=request.scene_id, storage_areas=storage_areas_data, operate_points=operate_points_data, total_capacity=total_capacity, used_capacity=used_capacity, dense_areas_count=dense_areas_count, general_areas_count=general_areas_count, total_layers=total_layers, occupied_layers=occupied_layers ) except Exception as e: logger.error(f"查询地图数据失败: {str(e)}") raise @staticmethod def _delete_existing_data(db: Session, scene_id: str): """删除现有数据""" # 先获取需要删除的动作点ID列表 operate_point_ids = db.query(OperatePoint.id).filter( and_( OperatePoint.scene_id == scene_id, OperatePoint.is_deleted == False ) ).all() operate_point_ids = [point_id[0] for point_id in operate_point_ids] # 物理删除动作点分层(为了避免主键冲突) if operate_point_ids: db.query(OperatePointLayer).filter( OperatePointLayer.operate_point_id.in_(operate_point_ids) ).delete(synchronize_session=False) # 物理删除动作点(为了避免主键冲突) db.query(OperatePoint).filter( and_( OperatePoint.scene_id == scene_id, OperatePoint.is_deleted == False ) ).delete(synchronize_session=False) # 物理删除库区(为了避免主键冲突) db.query(StorageArea).filter( and_( StorageArea.scene_id == scene_id, StorageArea.is_deleted == False ) ).delete(synchronize_session=False) @staticmethod def _calculate_storage_area_capacity(area_type: str, operate_points_data: List) -> int: """ 计算库区容量 Args: area_type: 库区类型 operate_points_data: 属于该库区的动作点数据列表 Returns: int: 计算出的容量 """ # 根据库区类型从配置中获取参数 if area_type == "dense": base_capacity = settings.MAP_DENSE_STORAGE_BASE_CAPACITY capacity_per_point = settings.MAP_DENSE_STORAGE_CAPACITY_PER_POINT layer_multiplier = settings.MAP_DENSE_STORAGE_LAYER_MULTIPLIER else: # general base_capacity = settings.MAP_GENERAL_STORAGE_BASE_CAPACITY capacity_per_point = settings.MAP_GENERAL_STORAGE_CAPACITY_PER_POINT layer_multiplier = settings.MAP_GENERAL_STORAGE_LAYER_MULTIPLIER # 基础容量 total_capacity = base_capacity # 根据动作点数量和层数计算额外容量 for point_data in operate_points_data: point_capacity = capacity_per_point # 如果有多层,应用层数倍数 if point_data.max_layers > 1: point_capacity = int(point_capacity * layer_multiplier * point_data.max_layers) total_capacity += point_capacity return total_capacity @staticmethod def _sync_extended_properties_to_new_layer(db: Session, layer: OperatePointLayer): """ 将所有已启用的扩展属性同步到新创建的库位层 Args: db: 数据库会话 layer: 新创建的库位层对象 """ try: # 导入扩展属性模型(在方法内导入避免循环导入) from data.models import ExtendedProperty import json import datetime # 获取所有已启用的扩展属性 extended_properties = db.query(ExtendedProperty).filter( ExtendedProperty.is_deleted == False, ExtendedProperty.is_enabled == True ).all() if not extended_properties: # 如果没有扩展属性,则不需要处理 return # 解析现有的config_json config = {} if layer.config_json: try: config = json.loads(layer.config_json) except Exception as e: logger.error(f"解析库位层 {layer.id} 的config_json失败: {str(e)}") config = {} # 确保extended_fields字段存在 if 'extended_fields' not in config: config['extended_fields'] = {} # 同步所有扩展属性 for prop in extended_properties: config['extended_fields'][prop.property_name] = { 'value': prop.default_value, 'type': prop.property_type.value, 'is_required': prop.is_required, 'updated_at': datetime.datetime.now().isoformat() } # 更新config_json layer.config_json = json.dumps(config, ensure_ascii=False, indent=2) logger.debug(f"为库位层 {layer.id} 同步了 {len(extended_properties)} 个扩展属性") except Exception as e: logger.error(f"同步扩展属性到库位层失败: {str(e)}") raise