后台管理系统(二)之SQL监控
作者:互联网
pom.xml
<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-jdbc</artifactId> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <scope>runtime</scope> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>1.1.17</version> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-configuration-processor</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>org.codehaus.jackson</groupId> <artifactId>jackson-mapper-asl</artifactId> <version>1.9.12</version> </dependency> <!-- 工具类 --> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-lang3</artifactId> <version>3.5</version> </dependency>
修改application.properties文件
server: port: 7001 spring: datasource: name: druidDataSource type: com.alibaba.druid.pool.DruidDataSource druid: driver-class-name: com.mysql.cj.jdbc.Driver url: jdbc:mysql://localhost:3306/qx?useUnicode=true&characterEncoding=utf8&serverTimezone=GMT%2B8&useSSL=false username: root password: root filters: stat,wall,slf4j,config #配置监控统计拦截的filters,去掉后监控界面SQL无法进行统计,wall用于防火墙。 max-active: 100 #最大连接数 initial-size: 1 #初始化大小 max-wait: 60000 #获取连接等待超时时间 min-idle: 1 #最小连接数 time-between-eviction-runs-millis: 60000 #间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒。 min-evictable-idle-time-millis: 300000 #一个连接在池中最小生存的时间,单位是毫秒。 validation-query: select 'x' test-while-idle: true test-on-borrow: false test-on-return: false pool-prepared-statements: true max-pool-prepared-statement-per-connection-size: 20 logging: config: classpath:logback.xml # level: # com.springframe.festmon.dao: trace
config
import com.alibaba.druid.pool.DruidDataSource; import com.alibaba.druid.support.http.StatViewServlet; import com.alibaba.druid.support.http.WebStatFilter; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.boot.web.servlet.FilterRegistrationBean; import org.springframework.boot.web.servlet.ServletRegistrationBean; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import javax.servlet.Filter; import javax.servlet.Servlet; import javax.sql.DataSource; import java.sql.SQLException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @Configuration @EnableConfigurationProperties({DruidDataSourceProperties.class}) //@EnableConfigurationProperties注解用于导入上一步自定义的Druid的配置信息。 public class DruidConfig { @Autowired private DruidDataSourceProperties properties; @Bean @ConditionalOnMissingBean public DataSource druidDataSource() { DruidDataSource druidDataSource = new DruidDataSource(); druidDataSource.setDriverClassName(properties.getDriverClassName()); druidDataSource.setUrl(properties.getUrl()); druidDataSource.setUsername(properties.getUsername()); druidDataSource.setPassword(properties.getPassword()); druidDataSource.setInitialSize(properties.getInitialSize()); druidDataSource.setMinIdle(properties.getMinIdle()); druidDataSource.setMaxActive(properties.getMaxActive()); druidDataSource.setMaxWait(properties.getMaxWait()); druidDataSource.setTimeBetweenEvictionRunsMillis(properties.getTimeBetweenEvictionRunsMillis()); druidDataSource.setMinEvictableIdleTimeMillis(properties.getMinEvictableIdleTimeMillis()); druidDataSource.setValidationQuery(properties.getValidationQuery()); druidDataSource.setTestWhileIdle(properties.isTestWhileIdle()); druidDataSource.setTestOnBorrow(properties.isTestOnBorrow()); druidDataSource.setTestOnReturn(properties.isTestOnReturn()); druidDataSource.setPoolPreparedStatements(properties.isPoolPreparedStatements()); druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(properties.getMaxPoolPreparedStatementPerConnectionSize()); try { druidDataSource.setFilters(properties.getFilters()); druidDataSource.init(); } catch (SQLException e) { e.printStackTrace(); } return druidDataSource; } /** * 配置 Druid 监控管理后台的Servlet; * 内置 Servler 容器时没有web.xml文件,所以使用 Spring Boot 的注册 Servlet 方式 * public ServletRegistrationBean druidServlet()相当于WebServlet配置。 */ @Bean @ConditionalOnMissingBean public ServletRegistrationBean<Servlet> druidServlet(){ ServletRegistrationBean<Servlet> servletServletRegistrationBean = new ServletRegistrationBean<Servlet>(new StatViewServlet(), "/druid/*"); //白名单 servletServletRegistrationBean.addInitParameter("allow","127.0.0.1"); //表示只有本机可以访问,为空或者为null时,表示允许所有访问 //ip黑名单(存在共同时,deny优先于allow) //如果满足deny的话会提示,sorry, you are not permitted to view this page servletServletRegistrationBean.addInitParameter("deny","172.13.13.31"); //登录查看信息的账号和密码,用于登录Druid监控后台 servletServletRegistrationBean.addInitParameter("loginUsername","admin"); servletServletRegistrationBean.addInitParameter("loginPassword","admin"); //是否能重置数据 servletServletRegistrationBean.addInitParameter("resetEnable","true"); return servletServletRegistrationBean; } /** * 配置 Druid 监控 之 web 监控的 filter * WebStatFilter:用于配置Web和Druid数据源之间的管理关联监控统计 * public FilterRegistrationBean filterRegistrationBean()相当于Web Filter配置。 */ @Bean @ConditionalOnMissingBean public FilterRegistrationBean<Filter> filterFilterRegistrationBean(){ FilterRegistrationBean<Filter> bean = new FilterRegistrationBean<Filter>(); bean.setFilter(new WebStatFilter()); //exclusions:设置哪些请求进行过滤排除掉,从而不进行统计 Map<String, String> initParams = new HashMap<>(); initParams.put("exclusions", "*.js,*.css,/druid/*"); bean.setInitParameters(initParams); //"/*" 表示过滤所有请求 bean.setUrlPatterns(Collections.singletonList("/*")); return bean; }
DruidDataSourceProperties.java
import org.springframework.boot.context.properties.ConfigurationProperties; @ConfigurationProperties(prefix = "spring.datasource.druid") //扫描配置类的属性前缀 public class DruidDataSourceProperties { // jdbc private String driverClassName; private String url; private String username; private String password; // jdbc connection pool private int initialSize; private int minIdle; private int maxActive = 100; private long maxWait; private long timeBetweenEvictionRunsMillis; private long minEvictableIdleTimeMillis; private String validationQuery; private boolean testWhileIdle; private boolean testOnBorrow; private boolean testOnReturn; private boolean poolPreparedStatements; private int maxPoolPreparedStatementPerConnectionSize; // filter private String filters; public String getDriverClassName() { return driverClassName; } public void setDriverClassName(String driverClassName) { this.driverClassName = driverClassName; } public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public int getInitialSize() { return initialSize; } public void setInitialSize(int initialSize) { this.initialSize = initialSize; } public int getMinIdle() { return minIdle; } public void setMinIdle(int minIdle) { this.minIdle = minIdle; } public int getMaxActive() { return maxActive; } public void setMaxActive(int maxActive) { this.maxActive = maxActive; } public long getMaxWait() { return maxWait; } public void setMaxWait(long maxWait) { this.maxWait = maxWait; } public long getTimeBetweenEvictionRunsMillis() { return timeBetweenEvictionRunsMillis; } public void setTimeBetweenEvictionRunsMillis(long timeBetweenEvictionRunsMillis) { this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis; } public long getMinEvictableIdleTimeMillis() { return minEvictableIdleTimeMillis; } public void setMinEvictableIdleTimeMillis(long minEvictableIdleTimeMillis) { this.minEvictableIdleTimeMillis = minEvictableIdleTimeMillis; } public String getValidationQuery() { return validationQuery; } public void setValidationQuery(String validationQuery) { this.validationQuery = validationQuery; } public boolean isTestWhileIdle() { return testWhileIdle; } public void setTestWhileIdle(boolean testWhileIdle) { this.testWhileIdle = testWhileIdle; } public boolean isTestOnBorrow() { return testOnBorrow; } public void setTestOnBorrow(boolean testOnBorrow) { this.testOnBorrow = testOnBorrow; } public boolean isTestOnReturn() { return testOnReturn; } public void setTestOnReturn(boolean testOnReturn) { this.testOnReturn = testOnReturn; } public boolean isPoolPreparedStatements() { return poolPreparedStatements; } public void setPoolPreparedStatements(boolean poolPreparedStatements) { this.poolPreparedStatements = poolPreparedStatements; } public int getMaxPoolPreparedStatementPerConnectionSize() { return maxPoolPreparedStatementPerConnectionSize; } public void setMaxPoolPreparedStatementPerConnectionSize(int maxPoolPreparedStatementPerConnectionSize) { this.maxPoolPreparedStatementPerConnectionSize = maxPoolPreparedStatementPerConnectionSize; } public String getFilters() { return filters; } public void setFilters(String filters) { this.filters = filters; } }
配置日志管理
新建logback.xml文件
大家可以根据项目修改LOG_HOME的value值,还有开发环境建议把下面这个注释掉,使用控制台输出日志,方便定位,真实环境,把控制台注释挂掉,改为日志文件输出
logback.xml
<?xml version="1.0" encoding="UTF-8"?> <!--dev env--> <configuration> <!-- fileError对应error级别,文件名以log-error-xxx.log形式命名 fileWarn对应warn级别,文件名以log-warn-xxx.log形式命名 fileInfo对应info级别,文件名以log-info-xxx.log形式命名 fileDebug对应debug级别,文件名以log-debug-xxx.log形式命名 stdout将日志信息输出到控制上,为方便开发测试使用 --> <contextName>data_server</contextName> <property name="LOG_HOME" value="wyq_log" /> <property name="log.maxHistory" value="1" /> <property name="log.lever" value="info" /> <appender name="fileError" class="ch.qos.logback.core.rolling.RollingFileAppender"> <!-- 正在记录的日志文件的路径及文件名 --> <file>${LOG_HOME}/log_error.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_HOME}/error/log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <MaxHistory>${log.maxHistory}</MaxHistory> <!-- 配置日志文件不能超过100M,若超过100M,日志文件会以索引0开始--> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <!-- 追加方式记录日志 --> <append>true</append> <!-- 日志文件的格式 --> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{50} - %msg%n</pattern> <!--<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level %logger Line:%-3L - %msg%n</pattern>--> <charset>utf-8</charset> </encoder> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>error</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="fileWarn" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_HOME}/log_warn.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_HOME}/warn/log-warn-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <MaxHistory>${log.maxHistory}</MaxHistory> <!-- 配置日志文件不能超过100M,若超过100M,日志文件会以索引0开始--> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <!-- 追加方式记录日志 --> <append>true</append> <!-- 日志文件的格式 --> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{50} - %msg%n</pattern> <!--<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level %logger Line:%-3L - %msg%n</pattern>--> <charset>utf-8</charset> </encoder> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>warn</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="fileInfo" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_HOME}/log_info.log</file> <!-- 日志记录器的滚动策略,按日期,按大小记录 --> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_HOME}/info/log-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <MaxHistory>${log.maxHistory}</MaxHistory> <!-- 配置日志文件不能超过100M,若超过100M,日志文件会以索引0开始--> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <!-- 追加方式记录日志 --> <append>true</append> <!-- 日志文件的格式 --> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{50} - %msg%n</pattern> <!--<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level %logger Line:%-3L - %msg%n</pattern>--> <charset>utf-8</charset> </encoder> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>info</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <!--encoder 默认配置为PatternLayoutEncoder--> <encoder> <!--<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{50} - %msg%n</pattern>--> <pattern>%date{yyyy-MM-dd HH:mm:ss} | %highlight(%p) | %boldYellow(%c) | %M:%boldGreen(%L) | %m%n</pattern> <!--<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level %logger Line:%-3L - %msg%n</pattern>--> <charset>utf-8</charset> </encoder> </appender> <logger name="org.springframework" level="${log.lever}" /> <!-- 生产环境下,将此级别配置为适合的级别,以免日志文件太多或影响程序性能 --> <root level="INFO"> <!--<appender-ref ref="fileError" /> <appender-ref ref="fileWarn" /> <appender-ref ref="fileInfo" />--> <appender-ref ref="STDOUT" /> </root> </configuration>
标签:return,管理系统,private,public,druidDataSource,SQL,后台,import,properties 来源: https://www.cnblogs.com/aimangguo/p/16286472.html