跳转至

Spring 数据库与缓存优化


数据库性能优化

1. 连接池和事务优化

JPA/Hibernate 优化配置

@Configuration
@EnableJpaRepositories(
    basePackages = "com.example.repository",
    enableDefaultTransactions = false // 手动控制事务
)
@EnableTransactionManagement
public class JpaConfig {

    @Bean
    @ConfigurationProperties("spring.jpa")
    public JpaProperties jpaProperties() {
        return new JpaProperties();
    }

    @Bean
    public LocalContainerEntityManagerFactoryBean entityManagerFactory(
            DataSource dataSource, JpaProperties jpaProperties) {
        LocalContainerEntityManagerFactoryBean em = new LocalContainerEntityManagerFactoryBean();
        em.setDataSource(dataSource);
        em.setPackagesToScan("com.example.entity");

        HibernateJpaVendorAdapter vendorAdapter = new HibernateJpaVendorAdapter();
        em.setJpaVendorAdapter(vendorAdapter);

        Map<String, Object> properties = new HashMap<>();
        properties.putAll(jpaProperties.getProperties());

        // Hibernate 性能优化配置
        properties.put("hibernate.jdbc.batch_size", 50);
        properties.put("hibernate.order_inserts", true);
        properties.put("hibernate.order_updates", true);
        properties.put("hibernate.jdbc.fetch_size", 100);
        properties.put("hibernate.show_sql", false);
        properties.put("hibernate.format_sql", false);
        properties.put("hibernate.use_sql_comments", false);
        properties.put("hibernate.generate_statistics", true);
        properties.put("hibernate.cache.use_second_level_cache", true);
        properties.put("hibernate.cache.use_query_cache", true);
        properties.put("hibernate.cache.region.factory_class", "org.hibernate.cache.jcache.JCacheRegionFactory");

        em.setJpaPropertyMap(properties);
        return em;
    }

    @Bean
    public PlatformTransactionManager transactionManager(EntityManagerFactory emf) {
        JpaTransactionManager transactionManager = new JpaTransactionManager();
        transactionManager.setEntityManagerFactory(emf);
        return transactionManager;
    }
}

// 事务优化服务
@Service
@Transactional(readOnly = true) // 默认只读事务
public class OptimizedTransactionService {

    @Autowired
    private UserRepository userRepository;

    // 只读查询
    public List<User> findAllUsers() {
        return userRepository.findAll();
    }

    // 写操作使用读写事务
    @Transactional
    public User createUser(User user) {
        return userRepository.save(user);
    }

    // 批量操作优化
    @Transactional
    public void batchCreateUsers(List<User> users) {
        for (int i = 0; i < users.size(); i++) {
            userRepository.save(users.get(i));

            // 每50条刷新一次,避免内存溢出
            if (i % 50 == 0) {
                userRepository.flush();
            }
        }
    }

    // 使用事务传播行为优化
    @Transactional(propagation = Propagation.REQUIRES_NEW)
    public void auditUserCreation(User user) {
        // 独立事务记录审计日志
        auditRepository.save(new AuditLog("USER_CREATED", user.getId()));
    }
}

SQL 性能优化

@Repository
public class OptimizedUserRepository {

    @PersistenceContext
    private EntityManager entityManager;

    // 使用原生SQL优化复杂查询
    @Query(value = """
        SELECT u.id, u.name, COUNT(o.id) as order_count
        FROM users u 
        LEFT JOIN orders o ON u.id = o.user_id
        WHERE u.created_date >= :startDate
        GROUP BY u.id, u.name
        HAVING COUNT(o.id) > :minOrderCount
        ORDER BY order_count DESC
        LIMIT :limit
        """, nativeQuery = true)
    List<Object[]> findActiveUsersWithOrderCount(
        @Param("startDate") LocalDate startDate,
        @Param("minOrderCount") int minOrderCount,
        @Param("limit") int limit);

    // 使用索引提示
    @Query(value = """
        SELECT /*+ INDEX(u idx_user_email) */ u 
        FROM User u 
        WHERE u.email = :email
        """)
    Optional<User> findByEmailWithIndexHint(@Param("email") String email);

    // 分页查询优化
    public Page<User> findUsersWithOptimizedPaging(Pageable pageable) {
        // 先查询ID,再根据ID查询详情(避免大字段分页)
        Page<Long> userIds = userRepository.findUserIds(pageable);
        List<User> users = userRepository.findAllById(userIds.getContent());

        return new PageImpl<>(users, pageable, userIds.getTotalElements());
    }

    // 使用游标处理大数据量
    public void processLargeDataset() {
        Stream<User> userStream = userRepository.streamAllBy();

        try (userStream) {
            userStream.forEach(user -> {
                // 处理每个用户
                processUser(user);
            });
        }
    }

    private void processUser(User user) {
        // 用户处理逻辑
    }
}

2. 分库分表策略

ShardingSphere 集成

# application.yml - ShardingSphere 分库分表配置
spring:
  shardingsphere:
    datasource:
      names: ds0,ds1
      ds0:
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        jdbc-url: jdbc:mysql://db0:3306/order_db
        username: root
        password: ${DB_PASSWORD}
      ds1:
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        jdbc-url: jdbc:mysql://db1:3306/order_db
        username: root
        password: ${DB_PASSWORD}
    rules:
      sharding:
        tables:
          t_order:
            actual-data-nodes: ds$->{0..1}.t_order_$->{0..3}
            database-strategy:
              standard:
                sharding-column: user_id
                sharding-algorithm-name: db-mod
            table-strategy:
              standard:
                sharding-column: order_id
                sharding-algorithm-name: table-mod
            key-generate-strategy:
              column: order_id
              key-generator-name: snowflake
        sharding-algorithms:
          db-mod:
            type: MOD
            props:
              sharding-count: 2
          table-mod:
            type: MOD
            props:
              sharding-count: 4
        key-generators:
          snowflake:
            type: SNOWFLAKE
            props:
              worker-id: 1
    props:
      sql-show: true
// 自定义分片算法
public class OrderShardingAlgorithm implements StandardShardingAlgorithm<Long> {

    @Override
    public String doSharding(Collection<String> availableTargetNames, 
                             PreciseShardingValue<Long> shardingValue) {
        long value = shardingValue.getValue();
        String suffix = String.valueOf(value % availableTargetNames.size());

        for (String targetName : availableTargetNames) {
            if (targetName.endsWith(suffix)) {
                return targetName;
            }
        }
        throw new IllegalArgumentException("无法找到分片目标: " + shardingValue);
    }

    @Override
    public Collection<String> doSharding(Collection<String> availableTargetNames,
                                         RangeShardingValue<Long> shardingValue) {
        // 范围查询时返回所有分片
        return availableTargetNames;
    }
}

// 分库分表下的分页查询优化
@Repository
public class ShardingOrderRepository {

    @Autowired
    private JdbcTemplate jdbcTemplate;

    /**
     * 分库分表下的深度分页优化
     * 避免 LIMIT offset, size 在大偏移量时的性能问题
     */
    public List<Order> findOrdersByPage(Long lastOrderId, int pageSize) {
        // 使用游标分页替代 OFFSET 分页
        String sql = "SELECT * FROM t_order WHERE order_id > ? ORDER BY order_id ASC LIMIT ?";
        return jdbcTemplate.query(sql, new OrderRowMapper(), lastOrderId, pageSize);
    }
}

缓存优化

1. 多级缓存策略

Spring Cache 多级缓存实现

@Configuration
@EnableCaching
public class MultiLevelCacheConfig {

    // 一级缓存:本地缓存(Caffeine)
    @Bean
    public CacheManager localCacheManager() {
        CaffeineCacheManager cacheManager = new CaffeineCacheManager();
        cacheManager.setCaffeine(Caffeine.newBuilder()
            .expireAfterWrite(10, TimeUnit.MINUTES)
            .maximumSize(1000)
            .recordStats());
        return cacheManager;
    }

    // 二级缓存:Redis 分布式缓存
    @Bean
    public RedisCacheManager redisCacheManager(RedisConnectionFactory redisConnectionFactory) {
        RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig()
            .entryTtl(Duration.ofHours(1))
            .disableCachingNullValues()
            .serializeKeysWith(RedisSerializationContext.SerializationPair.fromSerializer(new StringRedisSerializer()))
            .serializeValuesWith(RedisSerializationContext.SerializationPair.fromSerializer(new GenericJackson2JsonRedisSerializer()));

        return RedisCacheManager.builder(redisConnectionFactory)
            .cacheDefaults(config)
            .transactionAware()
            .build();
    }

    // 多级缓存管理器
    @Primary
    @Bean
    public CacheManager multiLevelCacheManager(
            CacheManager localCacheManager,
            CacheManager redisCacheManager) {
        return new MultiLevelCacheManager(localCacheManager, redisCacheManager);
    }
}

// 自定义多级缓存管理器
public class MultiLevelCacheManager implements CacheManager {

    private final CacheManager localCacheManager;
    private final CacheManager redisCacheManager;
    private final Set<String> cacheNames = new HashSet<>();

    public MultiLevelCacheManager(CacheManager localCacheManager, CacheManager redisCacheManager) {
        this.localCacheManager = localCacheManager;
        this.redisCacheManager = redisCacheManager;

        // 合并缓存名称
        cacheNames.addAll(localCacheManager.getCacheNames());
        cacheNames.addAll(redisCacheManager.getCacheNames());
    }

    @Override
    public Cache getCache(String name) {
        return new MultiLevelCache(name, 
            localCacheManager.getCache(name), 
            redisCacheManager.getCache(name));
    }

    @Override
    public Collection<String> getCacheNames() {
        return cacheNames;
    }
}

// 多级缓存实现
public class MultiLevelCache implements Cache {

    private final String name;
    private final Cache localCache;
    private final Cache redisCache;

    public MultiLevelCache(String name, Cache localCache, Cache redisCache) {
        this.name = name;
        this.localCache = localCache;
        this.redisCache = redisCache;
    }

    @Override
    public String getName() {
        return name;
    }

    @Override
    public Object getNativeCache() {
        return this;
    }

    @Override
    public ValueWrapper get(Object key) {
        // 先查本地缓存
        ValueWrapper value = localCache.get(key);
        if (value != null) {
            return value;
        }

        // 本地缓存未命中,查Redis
        value = redisCache.get(key);
        if (value != null) {
            // 回写到本地缓存
            localCache.put(key, value.get());
        }

        return value;
    }

    @Override
    public <T> T get(Object key, Class<T> type) {
        // 类似get方法实现
        T value = localCache.get(key, type);
        if (value != null) {
            return value;
        }

        value = redisCache.get(key, type);
        if (value != null) {
            localCache.put(key, value);
        }

        return value;
    }

    @Override
    public void put(Object key, Object value) {
        // 同时写入两级缓存
        localCache.put(key, value);
        redisCache.put(key, value);
    }

    @Override
    public void evict(Object key) {
        // 同时清除两级缓存
        localCache.evict(key);
        redisCache.evict(key);
    }

    @Override
    public void clear() {
        localCache.clear();
        redisCache.clear();
    }
}

// 缓存服务使用示例
@Service
public class CacheService {

    @Cacheable(value = "users", key = "#id")
    public User getUserById(Long id) {
        // 数据库查询逻辑
        return userRepository.findById(id).orElse(null);
    }

    @CachePut(value = "users", key = "#user.id")
    public User updateUser(User user) {
        return userRepository.save(user);
    }

    @CacheEvict(value = "users", key = "#id")
    public void deleteUser(Long id) {
        userRepository.deleteById(id);
    }

    // 缓存预热
    @PostConstruct
    public void warmUpCache() {
        List<User> activeUsers = userRepository.findActiveUsers();
        for (User user : activeUsers) {
            // 预加载到缓存
            cacheManager.getCache("users").put(user.getId(), user);
        }
    }
}

2. 缓存问题解决方案

缓存穿透解决方案

@Service
public class CachePenetrationSolution {

    // 布隆过滤器防止缓存穿透
    @Autowired
    private BloomFilter<String> bloomFilter;

    public User getUserWithBloomFilter(Long id) {
        String cacheKey = "user:" + id;

        // 先检查布隆过滤器
        if (!bloomFilter.mightContain(cacheKey)) {
            return null; // 肯定不存在
        }

        // 查询缓存
        User user = cacheService.getUserById(id);
        if (user == null) {
            // 查询数据库
            user = userRepository.findById(id).orElse(null);

            if (user != null) {
                // 写入缓存
                cacheService.cacheUser(user);
            } else {
                // 空值缓存,防止重复查询
                cacheService.cacheNullValue(id);
            }
        }

        return user;
    }

    // 空值缓存
    public void cacheNullValue(Long id) {
        String cacheKey = "user:" + id;
        redisTemplate.opsForValue().set(cacheKey, "NULL", 5, TimeUnit.MINUTES);
    }
}

// 布隆过滤器配置
@Configuration
public class BloomFilterConfig {

    @Bean
    public BloomFilter<String> userBloomFilter() {
        return BloomFilter.create(
            Funnels.stringFunnel(Charset.defaultCharset()),
            1000000, // 预期元素数量
            0.01     // 误判率
        );
    }

    @EventListener
    public void onApplicationReady(ApplicationReadyEvent event) {
        // 应用启动时初始化布隆过滤器
        List<Long> allUserIds = userRepository.findAllUserIds();
        BloomFilter<String> bloomFilter = userBloomFilter();

        for (Long userId : allUserIds) {
            bloomFilter.put("user:" + userId);
        }
    }
}

缓存雪崩解决方案

@Service
public class CacheAvalancheSolution {

    @Autowired
    private RedisTemplate<String, Object> redisTemplate;

    @Autowired
    private CacheManager localCacheManager;

    private final Random random = new Random();

    // 方案1:随机过期时间,避免大量 Key 同时失效
    public void cacheWithRandomExpiry(String key, Object value, long baseExpireSeconds) {
        // 在基础过期时间上增加随机偏移(±20%)
        long randomOffset = (long) (baseExpireSeconds * 0.2 * (random.nextDouble() * 2 - 1));
        long actualExpire = baseExpireSeconds + randomOffset;

        redisTemplate.opsForValue().set(key, value, actualExpire, TimeUnit.SECONDS);
    }

    // 方案2:多级缓存降级
    public <T> T getWithFallback(String key, Class<T> type, Supplier<T> dbLoader) {
        // 第一级:本地缓存
        Cache localCache = localCacheManager.getCache("local");
        Cache.ValueWrapper localValue = localCache.get(key);
        if (localValue != null) {
            return type.cast(localValue.get());
        }

        // 第二级:Redis 缓存
        try {
            Object redisValue = redisTemplate.opsForValue().get(key);
            if (redisValue != null) {
                localCache.put(key, redisValue); // 回写本地缓存
                return type.cast(redisValue);
            }
        } catch (Exception e) {
            // Redis 不可用时降级到本地缓存 + 数据库
            logger.warn("Redis 不可用,降级处理", e);
        }

        // 第三级:数据库查询
        T dbValue = dbLoader.get();
        if (dbValue != null) {
            localCache.put(key, dbValue);
            try {
                cacheWithRandomExpiry(key, dbValue, 3600);
            } catch (Exception ignored) {
                // Redis 写入失败不影响业务
            }
        }
        return dbValue;
    }

    // 方案3:熔断降级(结合 Resilience4j)
    @CircuitBreaker(name = "cacheService", fallbackMethod = "cacheFallback")
    public Object getFromCache(String key) {
        return redisTemplate.opsForValue().get(key);
    }

    public Object cacheFallback(String key, Throwable t) {
        logger.warn("缓存熔断降级,key={}", key);
        // 返回本地缓存或默认值
        Cache localCache = localCacheManager.getCache("local");
        Cache.ValueWrapper value = localCache.get(key);
        return value != null ? value.get() : null;
    }
}

缓存击穿解决方案

@Service
public class CacheBreakdownSolution {

    // 互斥锁防止缓存击穿
    public User getUserWithMutexLock(Long id) {
        String cacheKey = "user:" + id;
        String lockKey = "lock:user:" + id;

        // 尝试获取缓存
        User user = cacheService.getUserById(id);
        if (user != null) {
            return user;
        }

        // 获取分布式锁
        if (tryLock(lockKey)) {
            try {
                // 双重检查
                user = cacheService.getUserById(id);
                if (user != null) {
                    return user;
                }

                // 查询数据库
                user = userRepository.findById(id).orElse(null);
                if (user != null) {
                    cacheService.cacheUser(user);
                }

                return user;
            } finally {
                releaseLock(lockKey);
            }
        } else {
            // 未获取到锁,等待并重试
            Thread.sleep(100);
            return getUserWithMutexLock(id);
        }
    }

    private boolean tryLock(String lockKey) {
        return redisTemplate.opsForValue().setIfAbsent(lockKey, "locked", 30, TimeUnit.SECONDS);
    }

    private void releaseLock(String lockKey) {
        redisTemplate.delete(lockKey);
    }
}

3. 缓存预热策略

@Component
public class CacheWarmUpService {

    private static final Logger logger = LoggerFactory.getLogger(CacheWarmUpService.class);

    @Autowired
    private UserRepository userRepository;

    @Autowired
    private ProductRepository productRepository;

    @Autowired
    private CacheManager cacheManager;

    @Autowired
    private RedisTemplate<String, Object> redisTemplate;

    // 方案1:应用启动时预热
    @EventListener(ApplicationReadyEvent.class)
    public void warmUpOnStartup() {
        logger.info("开始缓存预热...");
        long start = System.currentTimeMillis();

        CompletableFuture.allOf(
            CompletableFuture.runAsync(this::warmUpHotUsers),
            CompletableFuture.runAsync(this::warmUpHotProducts),
            CompletableFuture.runAsync(this::warmUpConfigData)
        ).join();

        logger.info("缓存预热完成,耗时 {}ms", System.currentTimeMillis() - start);
    }

    // 预热热点用户数据
    private void warmUpHotUsers() {
        List<User> hotUsers = userRepository.findTop1000ByOrderByLoginCountDesc();
        Cache userCache = cacheManager.getCache("users");

        for (User user : hotUsers) {
            userCache.put(user.getId(), user);
        }
        logger.info("用户缓存预热完成,共 {} 条", hotUsers.size());
    }

    // 预热热门商品数据
    private void warmUpHotProducts() {
        List<Product> hotProducts = productRepository.findTop500ByOrderBySalesDesc();
        Cache productCache = cacheManager.getCache("products");

        for (Product product : hotProducts) {
            productCache.put(product.getId(), product);
        }
        logger.info("商品缓存预热完成,共 {} 条", hotProducts.size());
    }

    // 预热配置数据
    private void warmUpConfigData() {
        // 系统配置、字典数据等
        Map<String, String> configs = configRepository.findAllAsMap();
        redisTemplate.opsForHash().putAll("sys:config", configs);
        logger.info("配置缓存预热完成,共 {} 条", configs.size());
    }

    // 方案2:定时刷新预热(保持缓存新鲜度)
    @Scheduled(fixedRate = 300000) // 每5分钟
    public void scheduledWarmUp() {
        // 只刷新即将过期的热点数据
        Set<String> expiringKeys = findExpiringKeys("users:*", 60); // 60秒内过期的
        for (String key : expiringKeys) {
            String id = key.split(":")[1];
            User user = userRepository.findById(Long.parseLong(id)).orElse(null);
            if (user != null) {
                cacheManager.getCache("users").put(user.getId(), user);
            }
        }
    }

    private Set<String> findExpiringKeys(String pattern, long thresholdSeconds) {
        Set<String> keys = redisTemplate.keys(pattern);
        Set<String> expiringKeys = new HashSet<>();

        if (keys != null) {
            for (String key : keys) {
                Long ttl = redisTemplate.getExpire(key, TimeUnit.SECONDS);
                if (ttl != null && ttl > 0 && ttl < thresholdSeconds) {
                    expiringKeys.add(key);
                }
            }
        }
        return expiringKeys;
    }
}