目的:打算将一个访问量不大的项目的每个操作执行的sql存储于日志文件中,方便拷贝sql进行维护。
数据库连接池使用druid,数据库操作使用springjdbc+jpa,日志系统使用slf4j+logback。
因druid的sql语句输出级别是DEBUG,且slf4j不支持日志级别修改,所以只能将druid产生的DEBUG级别日志输出,这样输出级别就要降低为DEBUG,就不能随意写DEBUG级别的日志了。
而且会输出双份日志且将语句粘出来执行时需要手动将一个个参数匹配到对应占位符上,操作不便。
故新建一个Slf4jLogFilter给druid,设置statementExecutableSqlLogEnable属性为true
这样输出的sql日志仅有一条,会将参数自动赋值给占位符并可以格式化sql.可谓一举多得。
druid-configuration如下:
import com.alibaba.druid.filter.logging.Slf4jLogFilter;
import com.alibaba.druid.pool.DruidDataSource;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
/**
* @author lance
*/
@Data
@ConfigurationProperties(prefix = "spring.datasource")
@Configuration
@EnableTransactionManagement
@SuppressWarnings("unchecked")
public class DruidConfig
{
private String url;
private String username;
private String password;
private String driverClassName;
private int initialSize;
private int minIdle;
private int maxActive;
private int maxWait;
private int timeBetweenEvictionRunsMillis;
private int minEvictableIdleTimeMillis;
private String validationQuery;
private boolean testWhileIdle;
private boolean testOnBorrow;
private boolean testOnReturn;
private boolean poolPreparedStatements;
private int maxPoolPreparedStatementPerConnectionSize;
private String filters;
private String connectionProperties;
@Bean
public DataSource dataSource()
{
DruidDataSource datasource = new DruidDataSource();
datasource.setUrl(url);
datasource.setUsername(username);
datasource.setPassword(password);
datasource.setDriverClassName(driverClassName);
datasource.setInitialSize(initialSize);
datasource.setMinIdle(minIdle);
datasource.setMaxActive(maxActive);
datasource.setMaxWait(maxWait);
datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
datasource.setValidationQuery(validationQuery);
datasource.setTestWhileIdle(testWhileIdle);
datasource.setTestOnBorrow(testOnBorrow);
datasource.setTestOnReturn(testOnReturn);
datasource.setPoolPreparedStatements(poolPreparedStatements);
datasource.setMaxPoolPreparedStatementPerConnectionSize(maxPoolPreparedStatementPerConnectionSize);
List list = new ArrayList();
list.add(logFilter());
datasource.setProxyFilters(list);
try
{
datasource.setFilters(filters);
}
catch (SQLException e)
{
System.err.println("druid configuration initialization filter: " + e);
}
datasource.setConnectionProperties(connectionProperties);
return datasource;
}
@Bean
public PlatformTransactionManager transactionManager() throws SQLException
{
return new DataSourceTransactionManager(dataSource());
}
@Bean
public Slf4jLogFilter logFilter()
{
Slf4jLogFilter slf4jLogFilter = new Slf4jLogFilter();
slf4jLogFilter.setConnectionLogEnabled(false);
slf4jLogFilter.setStatementLogEnabled(false);
slf4jLogFilter.setResultSetLogEnabled(true);
slf4jLogFilter.setStatementExecutableSqlLogEnable(true);
return slf4jLogFilter;
}
}
日志设置为:warn和error级别的日志再另作输出,这样可以不时看一下warn或者error级别日志文件是否有信息,若存在日志则查看问题的时候去warn或者error文件中找到错误行去主日志文件中搜索就ok了,方便快捷,logback-spring.xml如下。
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="30 seconds" debug="false">
<include resource="org/springframework/boot/logging/logback/base.xml"/>
<!-- log files path -->
<property name="log.path" value="/logGroup/dataApi"/>
<!-- logfile -->
<appender name="LOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/info.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}-[%-5p]|%X{logId}|%-40.40logger{39}: %m%n</pattern>
<charset>utf-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/backup-info.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
<maxFileSize>60MB</maxFileSize>
<maxHistory>31</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
</appender>
<!-- warn -->
<appender name="WARN" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/warn.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}-[%-5p]|%X{logId}|%-40.40logger{39}: %m%n</pattern>
<charset>utf-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/backup-warn.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
<maxFileSize>60MB</maxFileSize>
<maxHistory>31</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- error -->
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/error.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}-[%-5p]|%X{logId}|%-40.40logger{39}: %m%n</pattern>
<charset>utf-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/backup-error.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
<maxFileSize>60MB</maxFileSize>
<maxHistory>31</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- global log level -->
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="LOGFILE"/>
<appender-ref ref="WARN"/>
<appender-ref ref="ERROR"/>
</root>
<logger name="org.springframework" level="WARN"/>
</configuration>
但是!因为项目中使用的oracle的程序包package,而druid无法格式化package...就会输出很长一串的日志,看着闹心。
于是做调整。自定义日志过滤器。刚开始的时候,自定义一个日至过滤器,如下:
import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.filter.Filter;
import ch.qos.logback.core.spi.FilterReply;
public class LogBackFilter extends Filter<ILoggingEvent>
{
@Override
public FilterReply decide(ILoggingEvent event)
{
if (event.getLevel() == Level.WARN || event.getLevel() == Level.ERROR)
{
String loggerName = event.getLoggerName();
if ("com.alibaba.druid.sql.SQLUtils".equals(loggerName) || "com.alibaba.druid.filter.stat.StatFilter".equals(loggerName))
{
return FilterReply.DENY;
}
}
return FilterReply.ACCEPT;
}
}
然后在每个appender节点下加入此filter,例如:
<filter class="com.lance.api.common.config.LogBackFilter"/>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
但是!多次测试发现,多filter,只有第一个是生效的。。这就坑了,如果自定义filter在级别filter之前,那每个日志文件都一样了,如果在之后。。则自定义filter就无效了....
所以最后只能每个级别的appender下排除原级别filter完全使用自定义的filter,在里面做日志级别输出控制,可以实现高度定制化,也不错~
最终,logback-spring.xml如下:
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="30 seconds" debug="false">
<include resource="org/springframework/boot/logging/logback/base.xml"/>
<!-- log files path -->
<property name="log.path" value="/logGroup/dataApi"/>
<!-- logfile -->
<appender name="LOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/info.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}-[%-5p]|%X{logId}|%-40.40logger{39}: %m%n</pattern>
<charset>utf-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/backup-info.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
<maxFileSize>60MB</maxFileSize>
<maxHistory>31</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
<filter class="com.lance.common.config.logback.LogBackInfoFilter"/>
</appender>
<!-- warn -->
<appender name="WARN" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/warn.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}-[%-5p]|%X{logId}|%-40.40logger{39}: %m%n</pattern>
<charset>utf-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/backup-warn.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
<maxFileSize>60MB</maxFileSize>
<maxHistory>31</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
<filter class="com.lance.common.config.logback.LogBackWarnFilter"/>
</appender>
<!-- error -->
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/error.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}-[%-5p]|%X{logId}|%-40.40logger{39}: %m%n</pattern>
<charset>utf-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/backup-error.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
<maxFileSize>60MB</maxFileSize>
<maxHistory>31</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
<filter class="com.lance.common.config.logback.LogBackErrFilter"/>
</appender>
<!-- global log level -->
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="LOGFILE"/>
<appender-ref ref="WARN"/>
<appender-ref ref="ERROR"/>
</root>
<logger name="org.springframework" level="WARN"/>
</configuration>
代码示例见:
https://blog.csdn.net/sinat_30637097/article/details/87920779