{ "version": "https://jsonfeed.org/version/1", "title": "涛声依旧", "subtitle": "天下事有难易乎?为之,则难者亦易矣", "icon": "https://hitoli.com/images/favicon.ico", "description": "天生我材必有用", "home_page_url": "https://hitoli.com", "items": [ { "id": "https://hitoli.com/2024/05/10/MySQL%E8%A1%A8%E5%88%86%E5%8C%BA/", "url": "https://hitoli.com/2024/05/10/MySQL%E8%A1%A8%E5%88%86%E5%8C%BA/", "title": "MySQL表分区", "date_published": "2024-05-10T09:09:00.000Z", "content_html": "

# 简介

\n
\n

当单表数据量过大时,就需要考虑对表进行分表或者分区了。分表和分区都是用来解决数据库中大量数据存储和查询效率的问题,但它们的实现方式和解决的问题有所不同。

\n
\n
\n

分表(Sharding):

\n
\n\n
\n

分区(Partitioning):

\n
\n\n
\n

区别:

\n
\n\n
\n

本文章介绍的是如何对单表进行分区。

\n
\n
# 给表添加分区
\n

1
2
3
4
5
6
7
8
ALTER TABLE 表名
PARTITION BY RANGE COLUMNS (时间字段名) (
\t-- 分区条件(时间小于2022-02-01的数据放入p202201分区中)
PARTITION p202201 VALUES LESS THAN ('2022-02-01'),
PARTITION p202202 VALUES LESS THAN ('2022-03-01'),
PARTITION p202203 VALUES LESS THAN ('2022-04-01'),
-- 继续定义更多的分区...
);

\n
# 按指定表名创建当前年月的分区
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
CREATE PROCEDURE create_monthly_partition(IN tableName VARCHAR(255))
BEGIN
DECLARE currentYear INT;
DECLARE currentMonth INT;
\tDECLARE nextYear INT;
DECLARE nextMonth INT;
DECLARE partitionName VARCHAR(255);
SET currentYear = YEAR(CURRENT_DATE);
SET currentMonth = MONTH(CURRENT_DATE);
\t-- 计算下一个月的年份和月份
IF currentMonth = 12 THEN
SET nextYear = currentYear + 1;
SET nextMonth = 1;
ELSE
SET nextYear = currentYear;
SET nextMonth = currentMonth + 1;
END IF;
SET partitionName = CONCAT('p', currentYear, LPAD(currentMonth, 2, 0));
SET @sql = CONCAT('ALTER TABLE ', tableName,
' ADD PARTITION (PARTITION ', partitionName,
' VALUES LESS THAN (\\'', nextYear, '-', LPAD(nextMonth, 2, 0), '-01\\'', '))');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
END

\n
# 调用创建新分区
\n

1
CALL create_monthly_partition('表名');

\n
# 删除指定分区
\n

1
ALTER TABLE dmpdb.stll_monitdata drop PARTITION 分区名;

\n
# 每月执行一次调用指定表添加新分区
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
CREATE EVENT create_monthly_partition_event
ON SCHEDULE EVERY 1 MONTH
STARTS '2024-01-01 00:00:00'
DO
BEGIN
-- 定义需要分区的表名列表
SET @tables = '表名1,表名2';
-- 遍历表名列表并为每个表创建分区
WHILE CHAR_LENGTH(@tables) > 0 DO
SET @tableName = SUBSTRING_INDEX(@tables, ',', 1);
SET @tables = SUBSTRING(@tables, CHAR_LENGTH(@tableName) + 2);
CALL create_monthly_partition(@tableName);
END WHILE;
END

\n", "tags": [ "工作", "解决问题", "mysql", "表分区" ] }, { "id": "https://hitoli.com/2024/04/30/Java%E5%90%8E%E7%AB%AF%E7%A6%81%E6%AD%A2%E6%8E%A5%E5%8F%A3%E7%9E%AC%E6%97%B6%E9%87%8D%E5%A4%8D%E8%B0%83%E7%94%A8/", "url": "https://hitoli.com/2024/04/30/Java%E5%90%8E%E7%AB%AF%E7%A6%81%E6%AD%A2%E6%8E%A5%E5%8F%A3%E7%9E%AC%E6%97%B6%E9%87%8D%E5%A4%8D%E8%B0%83%E7%94%A8/", "title": "Java后端禁止接口瞬时重复调用", "date_published": "2024-04-30T03:44:00.000Z", "content_html": "
# 简介
\n

由于前端会莫名其妙的对同一接口请求多次,从而占用后端资源造成浪费。所以采用了后端拦截相关重复请求的方案。此方案会将请求用户 id 加接口 url 加参数作为 key,请求时间作为 value,使用 ConcurrentHashMap 进行缓存。如果下次相同的请求和上次请求的时间在指定的范围内则认为此请求属于重复请求。

\n
# 自定义可重复读 Request
\n

request 的 body 只能读取一次,所以对其进行封装。
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
package xxx.support;

import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;

import javax.servlet.ReadListener;
import javax.servlet.ServletInputStream;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.Enumeration;
import java.util.Map;
import java.util.Objects;
import java.util.TreeMap;

@Slf4j
public class RepeatableReadHttpServletRequestWrapper extends HttpServletRequestWrapper {

private final byte[] requestBody;

public RepeatableReadHttpServletRequestWrapper(HttpServletRequest request) throws IOException {
super(request);
this.requestBody = readRequestBody(request);
}

private byte[] readRequestBody(HttpServletRequest request) throws IOException {
try (InputStream inputStream = request.getInputStream();
ByteArrayOutputStream result = new ByteArrayOutputStream()) {

byte[] buffer = new byte[1024];
int length;
while ((length = inputStream.read(buffer)) != -1) {
result.write(buffer, 0, length);
}

return result.toByteArray();
}
}

@Override
public ServletInputStream getInputStream() throws IOException {
// 直接使用 ByteArrayInputStream,它提供可重复读取的输入流
return new ServletInputStream() {
private final ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(requestBody);

@Override
public int read() throws IOException {
return byteArrayInputStream.read();
}

@Override
public boolean isFinished() {
return byteArrayInputStream.available() == 0;
}

@Override
public boolean isReady() {
return true;
}

@Override
public void setReadListener(ReadListener readListener) {
// 不需要实现,可以留空
}
};
}

@Override
public BufferedReader getReader() throws IOException {
// 使用 InputStreamReader 包装 ByteArrayInputStream,提供可重复读取的字符流
return new BufferedReader(new InputStreamReader(new ByteArrayInputStream(requestBody)));
}

/**
* 获取json格式的参数
* @return
*/
public String getParamsToJSONString() {
String jsonStr = "";
if ("POST".equals(this.getMethod().toUpperCase()) && this.isJsonRequest()) {
try {
jsonStr = this.readJsonData();
} catch (Exception e) {
log.error(e.getMessage());
}
} else {
Enumeration<String> parameterNames = this.getParameterNames();
if (Objects.nonNull(parameterNames) && parameterNames.hasMoreElements()) {
// 将参数排序后转为json
Map<String, String> paramsMap = new TreeMap<>();
while (parameterNames.hasMoreElements()) {
String paramName = parameterNames.nextElement();
paramsMap.put(paramName, this.getParameter(paramName));
}
jsonStr = JSON.toJSONString(paramsMap);
}
}
return jsonStr;
}

/**
* 判断是否json请求
* @return
*/
private boolean isJsonRequest() {
String contentType = this.getContentType();
return contentType != null && contentType.toLowerCase().contains("application/json");
}

/**
* 获取json格式的参数
* @return
* @throws IOException
*/
private String readJsonData() throws IOException {
return new String(this.readRequestBody(this), StandardCharsets.UTF_8);
}

}

\n
# 重复请求过滤器
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
package xxx.filter;

import cn.hutool.core.collection.CollectionUtil;
import xxx.RepeatableReadHttpServletRequestWrapper;
import org.springframework.boot.actuate.endpoint.web.WebEndpointResponse;
import org.springframework.security.web.util.matcher.RequestMatcher;
import org.springframework.web.filter.OncePerRequestFilter;

import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;

public class DuplicateRequestFilter extends OncePerRequestFilter {
   // 是否启用
   private Boolean duplicateRequestFilter;
   // 间隔时间(毫秒)
   private Long intervalTime;
   // 清除缓存时间(毫秒)
private Long clearCachetime;
   // 放行url
   private List<RequestMatcher> permitAll;

public DuplicateRequestFilter(Boolean duplicateRequestFilter, List<RequestMatcher> permitAll, Long intervalTime,
Long clearCachetime) {
this.duplicateRequestFilter = duplicateRequestFilter;
this.permitAll = permitAll;
this.intervalTime = intervalTime;
this.clearCachetime = clearCachetime;
}

// 存储参数和请求时间
private Map<String, Long> requestCache = new ConcurrentHashMap<>();

@Override
protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain)
throws ServletException, IOException {
boolean doFilter = true;
// 使用 ContentCachingRequestWrapper 包装原始请求
RepeatableReadHttpServletRequestWrapper wrappedRequest = null;
if (this.duplicateRequestFilter) {
// 判断请求路径是否需要放行
boolean permit = false;
if (CollectionUtil.isNotEmpty(this.permitAll)) {
for (RequestMatcher matcher: this.permitAll) {
if (matcher.matches(request)) {
permit = true;
break;
}
}
}
if (!permit) {
if (request instanceof RepeatableReadHttpServletRequestWrapper) {
wrappedRequest = (RepeatableReadHttpServletRequestWrapper) request;
} else {
wrappedRequest = new RepeatableReadHttpServletRequestWrapper(request);
}
doFilter = this.isValid(wrappedRequest);
}
}
if (doFilter) {
// 继续处理请求
filterChain.doFilter(Objects.nonNull(wrappedRequest) ? wrappedRequest : request, response);
} else {
response.setContentType("application/json");
response.setStatus(WebEndpointResponse.STATUS_TOO_MANY_REQUESTS);
// response.setStatus(HttpServletResponse.SC_OK);
// ObjectMapper mapper = new ObjectMapper();
// mapper.writeValue(response.getOutputStream(), R.error(WebEndpointResponse.STATUS_TOO_MANY_REQUESTS, "重复的请求"));
}
}

/**
* 验证请求的有效性(判断是否重复请求)
* @param request
* @return
*/
private boolean isValid(RepeatableReadHttpServletRequestWrapper request) {
boolean valid = true;
// 缓存的key
String key = TokenUtil.getUidByToken() + "_" + request.getServletPath() + "_" + request.getParamsToJSONString();
// 获取之前的请求时间
Long previousRequestTime = requestCache.get(key);
if (previousRequestTime != null) {
// 如果距离上次请求时间很短(例如1秒),则拒绝当前请求
if (System.currentTimeMillis() - previousRequestTime < this.intervalTime) {
valid = false;
}
}
this.clearOldRequests();
// 缓存当前请求时间
requestCache.put(key, System.currentTimeMillis());
return valid;
}

// 用于清除缓存中的旧请求数据,防止缓存无限增长
private void clearOldRequests() {
requestCache.entrySet().removeIf(entry -> System.currentTimeMillis() - entry.getValue() > this.clearCachetime);
}

}

\n
# 配置 OAuth2 资源
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
package xxx.config;


import xxx.AuthExceptionEntryPoint;
import xxx.CustomAccessDeniedHandler;
import xxx.DuplicateRequestFilter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.oauth2.config.annotation.web.configuration.EnableResourceServer;
import org.springframework.security.oauth2.config.annotation.web.configuration.ResourceServerConfigurerAdapter;
import org.springframework.security.oauth2.config.annotation.web.configurers.ResourceServerSecurityConfigurer;
import org.springframework.security.oauth2.provider.token.TokenStore;
import org.springframework.security.web.authentication.preauth.AbstractPreAuthenticatedProcessingFilter;
import org.springframework.security.web.util.matcher.AntPathRequestMatcher;

import javax.servlet.Filter;
import java.util.Arrays;
import java.util.stream.Collectors;

@Configuration
@EnableResourceServer
public class ResourceServerConfig extends ResourceServerConfigurerAdapter {

Logger log = LoggerFactory.getLogger(ResourceServerConfig.class);

@Autowired
private TokenStore tokenStore;

/**
* 是否开放所有接口
*/
@Value("${http.security.permitAll:false}")
private Boolean isPermitAll;

/**
* 是否启用重复请求过滤
*/
@Value("${request.duplicateFilter.enabled:true}")
private Boolean duplicateRequestFilter;

/**
* 间隔时间(毫秒)
*/
@Value("${request.duplicateFilter.interval_time:1000}")
private Long intervalTime;

/**
* 清除缓存时间(毫秒)
*/
@Value("${request.duplicateFilter.clear_cache_time:30000}")
private Long clearCachetime;

/**
* 不需要验证权限的接口
*/
private String[] permitAll = new String[] {
"/auth/getVCode", "/auth/login"
};


/**
    * 通行规则
* @param http
* @throws Exception
*/
@Override
public void configure(HttpSecurity http) throws Exception {
HttpSecurity httpSecurity = http.csrf().disable();
if (isPermitAll) {
httpSecurity.authorizeRequests().antMatchers("/**").permitAll();
} else {
httpSecurity.authorizeRequests()
.antMatchers(permitAll).permitAll()
.antMatchers("/**").authenticated();
}
if (this.duplicateRequestFilter) {
httpSecurity.addFilterAfter(duplicateRequestFilter(), AbstractPreAuthenticatedProcessingFilter.class);
}
//让X-frame-options失效,去除iframe限制
http.headers().frameOptions().disable();
}

@Override
public void configure(ResourceServerSecurityConfigurer resources) throws Exception {
resources.tokenStore(tokenStore).authenticationEntryPoint(new AuthExceptionEntryPoint())
.accessDeniedHandler(new CustomAccessDeniedHandler());

}

@Bean
public Filter duplicateRequestFilter() {
return new DuplicateRequestFilter(this.duplicateRequestFilter, Arrays.asList(this.permitAll)
.stream().map(AntPathRequestMatcher::new).collect(Collectors.toList()), this.intervalTime,
this.clearCachetime);
}

}

\n", "tags": [ "工作", "解决问题", "java", "429" ] }, { "id": "https://hitoli.com/2024/03/03/Docker%E5%AE%B9%E5%99%A8%E7%AE%A1%E7%90%86%E5%B9%B3%E5%8F%B0-Portainer%E5%AE%89%E8%A3%85/", "url": "https://hitoli.com/2024/03/03/Docker%E5%AE%B9%E5%99%A8%E7%AE%A1%E7%90%86%E5%B9%B3%E5%8F%B0-Portainer%E5%AE%89%E8%A3%85/", "title": "Docker容器管理平台-Portainer安装", "date_published": "2024-03-03T07:52:00.000Z", "content_html": "

# 简介

\n

Portainer 是一款开源的容器管理平台,它提供了易于使用的 Web UI 界面,用于管理和监控容器及容器集群。该软件支持多种容器技术和配置,包括但不限于 Docker、Kubernetes 和 Swarm。

\n

# 部署

\n

1
2
3
4
5
#原版
docker run -d --restart=always --name="portainer" -p 9000:9000 -v /var/run/docker.sock:/var/run/docker.sock -v D:\\docker\\portainer\\data:/data portainer/portainer-ce

#汉化版
docker run -d --restart=always --name="portainer" -p 9000:9000 -v /var/run/docker.sock:/var/run/docker.sock -v D:\\docker\\portainer\\data:/data 6053537/portainer-ce

\n

# 登录

\n

http://localhost:9000/#!/home
\n\"\"

\n", "tags": [ "Windows", "工具", "Docker", "Portainer" ] }, { "id": "https://hitoli.com/2024/01/19/IntellIJ%E5%8F%AA%E7%BC%96%E8%AF%91%E6%89%93%E5%8C%85%E6%8C%87%E5%AE%9A%E7%9A%84%E6%A8%A1%E5%9D%97/", "url": "https://hitoli.com/2024/01/19/IntellIJ%E5%8F%AA%E7%BC%96%E8%AF%91%E6%89%93%E5%8C%85%E6%8C%87%E5%AE%9A%E7%9A%84%E6%A8%A1%E5%9D%97/", "title": "IntellIJ只编译打包指定的模块", "date_published": "2024-01-19T08:06:00.000Z", "content_html": "

# 新增 Maven 配置

\n

IntellIJ -》 右侧小锤子旁下拉按钮选择 Edit Configurations -》+ 号按钮 -》Maven

\n

\"\"

\n

\"\"

\n

\"\"

\n

# 填写 Maven 命令

\n

Working directory 选择根目录,命令是基于选中的目录为执行目录,Run 填写以下命令

\n

1
2
3
4
5
6
7
8
clean install -pl emergency-dzdz/dzdz-yzt -am package -f pom.xml

clean: 执行项目的清理操作,删除之前编译生成的文件。
install: 将项目构建的结果安装到本地Maven仓库中,以供其他项目使用。
-pl emergency-dzdz/dzdz-yzt: 指定要构建的模块。注意模块路径,此处是构建emergency-dzdz下的dzdz-yzt模块。
-am: 表示构建指定模块时,同时构建其依赖模块。这样可以确保依赖模块也被更新。
package: 打包项目,生成JAR或WAR文件。
因此,这个命令的作用是清理、编译、安装dzdz-yzt和依赖模块,并将它们的构建结果安装到本地Maven仓库中。

\n", "tags": [ "工作", "解决问题", "IntellIJ", "Maven" ] }, { "id": "https://hitoli.com/2024/01/03/%E8%A7%A3%E5%86%B3Nginx%E8%AE%BF%E9%97%AE%E8%87%AA%E7%AD%BEssl%E8%AF%81%E4%B9%A6%E6%8A%A5%E4%B8%8D%E5%AE%89%E5%85%A8%E5%91%8A%E8%AD%A6/", "url": "https://hitoli.com/2024/01/03/%E8%A7%A3%E5%86%B3Nginx%E8%AE%BF%E9%97%AE%E8%87%AA%E7%AD%BEssl%E8%AF%81%E4%B9%A6%E6%8A%A5%E4%B8%8D%E5%AE%89%E5%85%A8%E5%91%8A%E8%AD%A6/", "title": "解决Nginx访问自签ssl证书报不安全告警", "date_published": "2024-01-03T10:01:00.000Z", "content_html": "

# 生成根证书私钥和根证书

\n

1
2
3
4
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/C=国家/ST=省/L=市/O=机构" -keyout CA-private.key -out CA-certificate.crt -reqexts v3_req -extensions v3_ca

#示例
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/C=CN/ST=EZ/L=EZ/O=EZ" -keyout CA-private.key -out CA-certificate.crt -reqexts v3_req -extensions v3_ca

\n

# 生成自签名证书私钥

\n

1
openssl genrsa -out private.key 2048

\n

# 根据自签名证书私钥生成自签名证书申请文件

\n

1
openssl req -new -key private.key -subj "/C=CN/ST=EZ/L=EZ/O=EZ/CN=192.168.2.117" -sha256 -out private.csr

\n

# 定义自签名证书扩展文件 (解决 chrome 安全告警),新建 private.ext 文件并写入以下内容(IP 为 nginx 服务器 ip,同 nginx.conf 中的 server_name)

\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[ req ]
default_bits = 1024
distinguished_name = req_distinguished_name
req_extensions = san
extensions = san
[ req_distinguished_name ]
countryName = CN
stateOrProvinceName = Definesys
localityName = Definesys
organizationName = Definesys
[SAN]
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
subjectAltName = IP:192.168.2.117

\n

# 生成自签名证书(有效期 100 年)

\n

1
openssl x509 -req -days 36500 -in private.csr -CA CA-certificate.crt -CAkey CA-private.key -CAcreateserial -sha256 -out private.crt -extfile private.ext -extensions SAN

\n

# nginx 的 ssl 证书配置

\n

1
2
ssl_certificate_key  /usr/local/nginx/ssl/private.key;
ssl_certificate /usr/local/nginx/ssl/private.crt;

\n

# 证书安装

\n

需要安装 CA-certificate.crt 到受信任的根证书颁发机构下,即可从浏览器正常访问且不会报不安全警告。

\n

1
2
3
4
5
6
7
8
9
#ssl测试
openssl s_client -connect localhost:8080
#检查证书格式
openssl x509 -in private.crt -text -noout
openssl rsa -in private.key -check
#检查证书是否过期(确保 "notBefore" 小于当前日期,"notAfter" 大于当前日期)
openssl x509 -in private.crt -noout -dates
#查看证书链
openssl x509 -in private.crt -noout -issuer -subject

\n", "tags": [ "工作", "解决问题", "Nginx", "https", "SSL", "证书" ] }, { "id": "https://hitoli.com/2024/01/03/Centos7%E7%BC%96%E8%AF%91%E5%8D%87%E7%BA%A7nginx/", "url": "https://hitoli.com/2024/01/03/Centos7%E7%BC%96%E8%AF%91%E5%8D%87%E7%BA%A7nginx/", "title": "Centos7编译升级nginx", "date_published": "2024-01-03T09:40:00.000Z", "content_html": "

# 配置

\n

./configure
\n# 安装目录
\n --prefix=/usr/local/nginx
\n#nginx 运行时的非特权用户
\n --user=nginx
\n#nginx 运行时的非特权用户组
\n --group=nginx
\n#nginx 运行时 pid 的目录
\n --pid-path=/var/run/nginx/nginx.pid
\n# 锁定文件目录,防止误操作,或其他使用
\n --lock-path=/var/lock/nginx.lock
\n#nginx 错误日志目录
\n --error-log-path=/var/log/nginx/error.log
\n#nginx 运行日志目录
\n --http-log-path=/var/log/nginx/access.log
\n# 开启 gz 模块,压缩静态页面
\n --with-http_gzip_static_module
\n--with-http_gunzip_module
\n# 开启 ssl 模块
\n --with-http_ssl_module
\n# 开启 http2 模块
\n --with-http_v2_module
\n#openssl 目录
\n --with-openssl=/home/openssl-3.2.0
\n#nginx 的客户端状态
\n --with-http_stub_status_module
\n--with-http_realip_module
\n# 设定客户端请求的临时目录
\n --http-client-body-temp-path=/usr/local/nginx/client
\n# 设定 http 代理临时目录
\n --http-proxy-temp-path=/usr/local/nginx/proxy
\n# 设定 fastcgi 临时目录
\n --http-fastcgi-temp-path=/usr/local/nginx/fastcgi
\n# 设定 uwsgi 临时目录
\n --http-uwsgi-temp-path=/usr/local/nginx/uwsgi
\n# 设定 scgi 临时目录
\n --http-scgi-temp-path=/usr/local/nginx/scgi
\n

1
./configure --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-http_v2_module  --with-openssl=/home/openssl-3.2.0

\n

# 编译

\n

1
make(不要make install)

\n

# 备份

\n

1
cp /usr/local/nginx/sbin/nginx /usr/local/nginx/sbin/nginx.old

\n

# 更新

\n

1
2
3
4
5
6
#关闭nginx
nginx -s stop
#更新nginx
cp /root/nginx-1.24.0/objs/nginx /usr/local/nginx/sbin/
#启动nginx
nginx

\n", "tags": [ "工作", "解决问题", "Nginx", "CentOS" ] }, { "id": "https://hitoli.com/2023/12/24/Docker-desktop%E9%83%A8%E7%BD%B2nacos/", "url": "https://hitoli.com/2023/12/24/Docker-desktop%E9%83%A8%E7%BD%B2nacos/", "title": "Docker desktop部署nacos", "date_published": "2023-12-24T13:37:00.000Z", "content_html": "

# 创建数据库

\n

1
create database nacos

\n

# 下载初始化脚本

\n

脚本文件

\n

# 以普通模式启动获取数据

\n

1
docker run -d --restart=always --name="nacos" -e MODE=standalone -p 8848:8848 -p 9848:9848 nacos/nacos-server:latest

\n

# 进入容器内修改文件权限

\n

1
2
3
chmod 777 /home/nacos/conf
chmod 777 /home/nacos/data
chmod 777 /home/nacos/logs

\n

# 拷贝文件到本地

\n

1
2
3
docker cp nacos:/home/nacos/conf D:\\docker\\nacos\\data\\
docker cp nacos:/home/nacos/data D:\\docker\\nacos\\data\\
docker cp nacos:/home/nacos/logs D:\\docker\\nacos\\data\\

\n

# 创建正式容器

\n

1
docker run -d --name nacos --restart=always --network my-net -p 8848:8848 -p 9848:9848 -p 9849:9849 -e MODE=standalone --privileged=true -e SPRING_DATASOURCE_PLATFORM=mysql -e MYSQL_SERVICE_HOST=mysql地址 -e MYSQL_SERVICE_PORT=mysql端口 -e MYSQL_SERVICE_USER=mysql用户名 -e MYSQL_SERVICE_PASSWORD=mysql密码 -e MYSQL_SERVICE_DB_NAME=nacos -e TIME_ZONE='Asia/Shanghai' -v D:\\docker\\nacos\\data\\logs:/home/nacos/logs -v D:\\docker\\nacos\\data\\data:/home/nacos/data -v D:\\docker\\nacos\\data\\conf:/home/nacos/conf nacos/nacos-server:latest

\n", "tags": [ "Windows", "工具", "docker", "nacos" ] }, { "id": "https://hitoli.com/2023/12/02/fastjson%E5%BA%8F%E5%88%97%E5%8C%96%E5%8E%BB%E9%99%A4%E7%A9%BA%E5%AD%97%E7%AC%A6%E4%B8%B2/", "url": "https://hitoli.com/2023/12/02/fastjson%E5%BA%8F%E5%88%97%E5%8C%96%E5%8E%BB%E9%99%A4%E7%A9%BA%E5%AD%97%E7%AC%A6%E4%B8%B2/", "title": "fastjson序列化去除空字符串属性", "date_published": "2023-12-02T13:47:00.000Z", "content_html": "

今天在把对象转为 json 时需要去除 key 或者 value 为 null 或空字符串的属性,特此记录一下后续好复用。
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
public static String toJSONString(Object object) {
SerializerFeature[] serializerFeatures = new SerializerFeature[] {
//格式化时间
SerializerFeature.WriteDateUseDateFormat
};
return JSON.toJSONString(object, new ValueFilter() {
@Override
public Object process(Object object, String name, Object value) {
               // 如果名称或者值为null或空字符串,则不序列化该属性
if (name == null || (name instanceof String && ((String) name).isEmpty()) ||
value == null || (value instanceof String && ((String) value).isEmpty())) {
return null;
}
return value;
}
}, serializerFeatures);
}

\n", "tags": [ "工作", "解决问题", "fastjson" ] }, { "id": "https://hitoli.com/2023/11/03/Linux%E4%B8%8B%E5%BF%AB%E9%80%9F%E9%83%A8%E7%BD%B2SpringBoot%E9%A1%B9%E7%9B%AE%E7%9A%84%E8%84%9A%E6%9C%AC/", "url": "https://hitoli.com/2023/11/03/Linux%E4%B8%8B%E5%BF%AB%E9%80%9F%E9%83%A8%E7%BD%B2SpringBoot%E9%A1%B9%E7%9B%AE%E7%9A%84%E8%84%9A%E6%9C%AC/", "title": "Linux下快速部署SpringBoot项目的脚本", "date_published": "2023-11-03T13:35:00.000Z", "content_html": "

# Windows 部署脚本

\n

只需要把 jar 和 yml 跟脚本放在同一目录下即可快速启动。

\n
\n

拷贝以下代码放入 txt 文本,然后改为 start.sh

\n
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#!/bin/bash

export CLOUD_HOME=`pwd`

# 获取当前目录中的第一个JAR文件的名称
jar_file=$(find . -maxdepth 1 -type f -name "*.jar" | head -n 1)

if [ -n "$jar_file" ]; then
jar_file=${jar_file#./}
#echo "JAR文件的名称是: $jar_file"
jar_file_name=$(basename "$jar_file" .jar)
else
echo "当前目录没有JAR文件."
exit
fi

# 获取当前目录中的第一个yml文件的名称
yml_file=$(find . -maxdepth 1 -type f -name "*.yml" | head -n 1)

if [ -n "$yml_file" ]; then
yml_file=${yml_file#./}
#echo "YML文件的名称是: $yml_file"
else
echo "当前目录中没有YML文件."
fi

pids=$(ps -ef | grep java | grep $jar_file_name | grep -v grep | awk '{print $2}')

for pid in $pids; do
echo "$jar_file_name is running, pid="$pid
exit 0
done

echo "$jar_file_name is pedding..."

sleep 3

JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom -Dfile.encoding=UTF8"
JAVA_OPTS="$JAVA_OPTS -Dsun.jnu.encoding=UTF8 -Xms512m -Xmx1024m"
JAVA_OPTS="$JAVA_OPTS -Dpid.path=$CLOUD_HOME/temp -Dspring.config.additional-location=$CLOUD_HOME/$yml_file"
JAVA_OPTS="$JAVA_OPTS -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5007"

nohup java $JAVA_OPTS -jar $CLOUD_HOME/$jar_file >/dev/null 2> $CLOUD_HOME/$jar_file_name.run &
#nohup java $JAVA_OPTS -jar $CLOUD_HOME/$jar_file > $CLOUD_HOME/$jar_file_name.run 2>&1 &

echo "$jar_file_name started."

\n
\n

拷贝以下代码放入 txt 文本,然后改为 stop.sh

\n
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#!/bin/bash

export CLOUD_HOME=`pwd`

# 获取当前目录中的第一个JAR文件的名称
jar_file=$(find . -maxdepth 1 -type f -name "*.jar" | head -n 1)

if [ -n "$jar_file" ]; then
jar_file=${jar_file#./}
#echo "JAR文件的名称是: $jar_file"
jar_file_name=$(basename "$jar_file" .jar)
else
echo "当前目录没有JAR文件."
exit
fi

# 获取当前目录中的第一个yml文件的名称
yml_file=$(find . -maxdepth 1 -type f -name "*.yml" | head -n 1)

if [ -n "$yml_file" ]; then
yml_file=${yml_file#./}
#echo "YML文件的名称是: $yml_file"
else
echo "当前目录中没有YML文件."
fi

pids=$(ps -ef | grep java | grep $jar_file_name | grep -v grep | awk '{print $2}')

for pid in $pids; do
kill -9 $pid
done

echo "$jar_file_name is stopping..."

sleep 5

echo "$jar_file_name stopped."

\n", "tags": [ "Linux", "Shell", "Linux", "SpringBoot", "快速部署", "bash" ] }, { "id": "https://hitoli.com/2023/10/29/%E7%BB%99%E6%88%91%E7%9A%84%E8%80%81%E7%AC%94%E8%AE%B0%E6%9C%AC%E6%B8%85%E7%90%86%E7%81%B0%E5%B0%98/", "url": "https://hitoli.com/2023/10/29/%E7%BB%99%E6%88%91%E7%9A%84%E8%80%81%E7%AC%94%E8%AE%B0%E6%9C%AC%E6%B8%85%E7%90%86%E7%81%B0%E5%B0%98/", "title": "给我的老笔记本清理灰尘", "date_published": "2023-10-29T12:41:00.000Z", "content_html": "

今天闲着无事就把我的老笔记本拆了,清理了一下灰尘。笔记本已经 10 多年了,中间加过内存,换过固态硬盘。清理一下还能发挥它的余热!
\n\"\"
\n\"\"
\n\"\"
\n\"\"
\n\"\"
\n\"\"
\n\"\"

\n", "tags": [ "生活", "日常记录", "笔记本", "DELL" ] }, { "id": "https://hitoli.com/2023/10/28/Windows%E4%B8%8B%E5%BF%AB%E9%80%9F%E9%83%A8%E7%BD%B2SpringBoot%E9%A1%B9%E7%9B%AE%E7%9A%84%E6%89%B9%E5%A4%84%E7%90%86/", "url": "https://hitoli.com/2023/10/28/Windows%E4%B8%8B%E5%BF%AB%E9%80%9F%E9%83%A8%E7%BD%B2SpringBoot%E9%A1%B9%E7%9B%AE%E7%9A%84%E6%89%B9%E5%A4%84%E7%90%86/", "title": "Windows下快速部署SpringBoot项目的批处理", "date_published": "2023-10-28T11:56:00.000Z", "content_html": "

# Windows 部署脚本

\n

只需要把 jar 和 yml 跟批处理放在同一目录下即可点击快速启动。启动后再次点击会关闭上次启动的窗口并重新启动。

\n
\n

拷贝以下代码放入 txt 文本,然后改为 start.bat

\n
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
@ECHO OFF
setlocal enabledelayedexpansion

REM 关闭上次进程
SET "pidFile=pid.txt"
if exist "%pidFile%" (
\tfor /f "usebackq" %%a in ("pid.txt") do (
\t\tset PID=%%a
\t)
\tif not "!PID!"=="" (
\t\ttaskkill /F /T /PID !pid!
\t\tdel pid.txt
\t)
)

REM 存储当前进程
for /f %%i in ('wmic process where "name='cmd.exe' and CommandLine like '%%<scriptname>.bat%%'" get ParentProcessId ^| findstr /r "[0-9]"') do set pid=%%i
echo %PID% > pid.txt

REM 设置title
for /f "tokens=2" %%i in ('chcp') do set codepage=%%i
chcp 65001 > nul
title 我的SpringBoot项目
chcp %codepage% > nul

cd %~dp0

REM 获取jar
set "jarFile="
for %%i in (*.jar) do (
if not defined jarFile (
set "jarFile=%%i"
)
)

if not defined jarFile (
echo not find jar
pause
exit
)

SET JAVA_OPTS=-Djava.security.egd=file:/dev/./urandom -Dfile.encoding=UTF-8
set JAVA_OPTS=%JAVA_OPTS% -Dsun.jnu.encoding=UTF8 -Xms512m -Xmx1024m
set JAVA_OPTS=%JAVA_OPTS% -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5007
set JAVA_OPTS=%JAVA_OPTS% -Dpid.path=./temp

REM 获取yml
set "ymlFile="
for %%i in (*.yml) do (
if not defined ymlFile (
set "ymlFile=%%i"
)
)

if defined ymlFile (
\tset JAVA_OPTS=%JAVA_OPTS% -Dspring.config.additional-location=!ymlFile!
) else (
\techo not find yml
)

REM 启动服务
java %JAVA_OPTS% -jar !jarFile!
pause

\n
\n

拷贝以下代码放入 txt 文本,然后改为 stop.bat

\n
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
@ECHO OFF
setlocal enabledelayedexpansion

REM 关闭上次进程
SET "pidFile=pid.txt"
if exist "%pidFile%" (
\tfor /f "usebackq" %%a in ("pid.txt") do (
\t\tset PID=%%a
\t)
\tif not "!PID!"=="" (
\t\ttaskkill /F /T /PID !pid!
\t\tdel pid.txt
\t)
)

exit

\n", "tags": [ "Windows", "工具", "SpringBoot", "Bat", "批处理", "快速部署" ] }, { "id": "https://hitoli.com/2023/10/28/Centos%E6%8C%82%E8%BD%BD%E6%96%B0%E7%A1%AC%E7%9B%98/", "url": "https://hitoli.com/2023/10/28/Centos%E6%8C%82%E8%BD%BD%E6%96%B0%E7%A1%AC%E7%9B%98/", "title": "Centos挂载新硬盘", "date_published": "2023-10-28T11:26:00.000Z", "content_html": "

# 查看磁盘信息

\n

1
fdisk -l 查看当前磁盘的分区情况

\n\"\"
\n 可从图中获取以下信息:
\n/dev/vdb 数据盘容量为 60GB,包含 MBR 分区 /dev/vdb1,容量为 50GB。
\n/dev/vdc 数据盘容量为 60GB,包含 GPT 分区 /dev/vdc1,容量为 50GB。

\n

1
df -TH 分区的文件系统类型

\n\"\"
\n 可从图中获取以下信息:
\n/dev/vdb1 文件系统类型为 ext4,已挂载至 /mnt/disk1。
\n/dev/vdc1 文件系统类型为 xfs,已挂载至 /mnt/disk2。
\n
1
fdisk /dev/vdb 查看新磁盘情况

\n\"\"
\n
1
lsbl 查看分区情况

\n\"\"

\n

# 挂载新硬盘

\n

1
mkfs.ext4 /dev/vdb 格式化磁盘

\n\"\"
\n
1
2
3
cd /mnt
mkdir data 新建挂载点
mount /dev/vdb /mnt/data 挂载

\n
1
df -h 查看挂载情况

\n\"\"
\n 查看 UUID 有三种方式:
\n
1
blkid

\n\"\"
\n
1
lsblk -f

\n\"\"
\n
1
ll /dev/disk/by-uuid/

\n\"\"
\n
1
2
3
4
设置自动挂载:
echo "UUID=c8ac09ca-fd4d-4511-bd2c-4fdf96f08168 /data ext4 defaults 0 0" >> /etc/fstab
自动挂载/etc/fstab里面的东西
mount -a

\n

# 临时卸载

\n

1
umount /dev/vdb 重启机器之后又恢复到挂载状态

\n

# 永久卸载

\n

1
vim /etc/fstab 把添加的磁盘信息删除即可。

\n", "tags": [ "Linux", "Centos", "Centos", "Linux", "Mount" ] }, { "id": "https://hitoli.com/2023/09/09/squid-stunnel-%E7%A7%91%E5%AD%A6%E4%B8%8A%E7%BD%91/", "url": "https://hitoli.com/2023/09/09/squid-stunnel-%E7%A7%91%E5%AD%A6%E4%B8%8A%E7%BD%91/", "title": "squid+stunnel 科学上网", "date_published": "2023-09-09T08:48:00.000Z", "content_html": "

# 前言

\n

科学上网的方法有多种,有很多第三方提供的免费方案,这些方案优缺点暂时不予讨论。实际工作生活中还是会有需要自己搭建的情况,这次介绍的是使用 squid+stunnel 方案进行搭建。

\n

# 准备

\n

一台可以访问外网的服务器,如香港的云主机并安装 Ubuntu 系统。

\n
# squid 部分
\n\n

1
apt-get install -y squid

\n\n
\n

生成用户文件

\n
\n

1
2
apt-get install apache2-utils
htpasswd -c /etc/squid/squid_user.txt 用户名

\n
\n

修改 squid 配置
\n 1、直接修改 /etc/squid/squid.conf 文件
\n 2、修改 /etc/squid/conf.d/debian.conf 文件
\n两种方式都一样,在底部加入以下代码

\n
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#dns服务器地址
dns_nameservers 8.8.8.8 8.8.4.4
dns_v4_first on
# 监听端口
http_port 3128
# 定义squid密码文件与ncsa_auth文件位置
auth_param basic program /usr/lib/squid/basic_ncsa_auth /etc/squid/squid_user.txt
# 认证进程的数量
auth_param basic children 15
# 认证对话框显示提示信息
auth_param basic realm Squid proxy-caching web server
# 认证有效期
auth_param basic credentialsttl 24 hours
# 是否区分用户名大小,off为不区分
auth_param basic casesensitive off
# 对定义的squid_user文件内的用户开启认证访问
acl 用户名 proxy_auth REQUIRED
# 允许squid_user文件内用户进行代理
http_access allow 用户名
# 顺序匹配,最后添加拒绝所有未允许的规则。不添加会发现,未匹配到的规则会被放行
http_access deny all
# 缓存设置
cache_dir ufs /var/spool/squid 100 16 256 read-only
cache_mem 0 MB
coredump_dir /var/spool/squid
# 配置高匿,不允许设置任何多余头信息,保持原请求header。
header_access Via deny all
header_access X-Forwarded-For deny all
header_access Server deny all
header_access X-Cache deny all
header_access X-Cache-Lookup deny all
forwarded_for off
via off
# logs相关配置
emulate_httpd_log on
logformat squid %{X-Forwarded-For}>h %ui %un [%tl] "%rm %ru HTTP/%rv" %Hs %<st "%{Referer}>h" "%{User-Agent}>h" %Ss:%Sh
access_log /var/log/squid/access.log squid
cache_log /var/log/squid/cache.log
cache_store_log /var/log/squid/store.log
logfile_rotate 20

\n
\n

至次已经可以通过填写安装 squid 的服务器 ip 加端口 3128 加用户名密码进行代理访问了(通过访问 https://www.ip.cn/ 查看 ip 就会发现自己的出口 ip 已经变成了 squid 服务器的 ip 了)。但是要想科学上网还必须对代理的数据进行加密,否则访问外网还是会被我国的长城防火墙阻挡,所以还需要安装 stunnel 来实现此目的。

\n
\n
# stunnel 服务端部分
\n\n

1
apt-get install -y stunnel

\n\n

1
openssl req -new -x509 -days 3650 -nodes -out stunnel.pem -keyout stunnel.pem

\n\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
; 设置stunnel的pid文件路径
pid = /etc/stunnel/stunnel.pid
; 设置stunnel工作的用户(组)
setuid = root
setgid = root

; 开启日志等级:emerg (0), alert (1), crit (2), err (3), warning (4), notice (5), info (6), or debug (7)
debug = 7
; 日志文件路径
output = /etc/stunnel/stunnel.log

; 证书文件
cert = /etc/stunnel/stunnel.pem
; 私钥文件
key = /etc/stunnel/stunnel.pem

; 自定义服务名squid-proxy
[squid-proxy]
; 服务监听的端口,client要连接这个端口与server通信
accept = 1234(自定义)
; 服务要连接的端口,连接到squid的3128端口,将数据发给squid
connect = 3128

\n
# stunnel 客户端部分
\n
\n

可以安装在要代理的机器上,在需要代理的情况下再开启(代理地址填 127.0.0.1 加客户端监听端口)。也可以安装在国内的服务器上一直保持连接(代理信息填国内服务器 ip 加客户端监听端口)。本示例客户端为 windows 系统

\n
\n\n

https://www.stunnel.org/downloads.html

\n\n

1
2
3
4
5
6
7
8
9
10
11
[squid-proxy]
client = yes
; 监听3128端口,那么用户浏览器的代理设置就是 stunnel-client-ip:3128
accept = 3128
; 要连接到的stunnel server的ip与端口
connect = stunnel服务端ip:1234(服务端自定义端口)

; 需要验证对方发过来的证书
verify = 2
; 用来进行证书验证的文件(stunnel服务端生成的证书复制到以下目录并改名为stunnel-server.pem)
CAfile = C:\\Program Files (x86)\\stunnel\\config\\stunnel-server.pem

\n
\n

至次配置好代理 ip 为 stunnel 客户端 ip 加端口 3128 就可以正式科学上网了。如果只想对需要科学的 url 进行代理,可以通过安装 Proxy SwitchyOmega 插件实现(规则地址可通过 https://github.com/gfwlist/gfwlist 获取)。

\n
\n

\"\"
\n\"\"

\n", "tags": [ "生活", "技术分享", "squid", "stunnel", "科学上网", "代理" ] }, { "id": "https://hitoli.com/2023/09/09/Windows-11%E5%8F%B3%E9%94%AE%E8%8F%9C%E5%8D%95%E6%81%A2%E5%A4%8D%E8%80%81%E7%89%88%E6%9C%AC/", "url": "https://hitoli.com/2023/09/09/Windows-11%E5%8F%B3%E9%94%AE%E8%8F%9C%E5%8D%95%E6%81%A2%E5%A4%8D%E8%80%81%E7%89%88%E6%9C%AC/", "title": "Windows 11右键菜单恢复老版本", "date_published": "2023-09-09T08:38:00.000Z", "content_html": "

# 恢复方法

\n

1、按【Win+X】

\n

2、选择【终端管理员】

\n

3、输入以下命令并回车:
\n reg add "HKCU\\Software\\Classes\\CLSID\\{86ca1aa0-34aa-4e8b-a509-50c905bae2a2}\\InprocServer32" /f /ve

\n

4、重启电脑

\n", "tags": [ "Windows", "系统优化", "Windows 11" ] }, { "id": "https://hitoli.com/2023/07/08/%E8%A7%A3%E5%86%B3Lombok%E6%8A%A5%E9%94%99/", "url": "https://hitoli.com/2023/07/08/%E8%A7%A3%E5%86%B3Lombok%E6%8A%A5%E9%94%99/", "title": "解决Lombok报错", "date_published": "2023-07-08T02:51:00.000Z", "content_html": "

# 问题描述

\n
    \n
  1. 报错详情
  2. \n
\n
\n

java: You aren’t using a compiler supported by lombok, so lombok will not work and has been disabled.
\nYour processor is: com.sun.proxy.$Proxy26
\nLombok supports: OpenJDK javac, ECJ

\n
\n

\"\"

\n
    \n
  1. \n

    问题分析
    \n属于 lombok 编译不通过,原因可能是因为依赖没有更到最新版本

    \n
  2. \n
  3. \n

    解决办法
    \n在 IntelliJ IDEA 的全局配置 Compiler 中添加如下配置:
    \n

    1
    -Djps.track.ap.dependencies=false

    \n\"\"

    \n
  4. \n
\n", "tags": [ "工作", "解决问题", "Lombok", "IntelliJ", "IDEA" ] }, { "id": "https://hitoli.com/2023/07/01/shoka%E4%B8%BB%E9%A2%98%E9%80%9F%E5%BA%A6%E4%BC%98%E5%8C%96-%E6%8B%86%E5%88%86jsdelivr/", "url": "https://hitoli.com/2023/07/01/shoka%E4%B8%BB%E9%A2%98%E9%80%9F%E5%BA%A6%E4%BC%98%E5%8C%96-%E6%8B%86%E5%88%86jsdelivr/", "title": "shoka主题速度优化-拆分jsdelivr", "date_published": "2023-06-30T17:15:00.000Z", "content_html": "

# 前言

\n
\n

该篇文章介绍了如何对 shoka 主题进行 jsdelivr 聚合拆分,以便使用国内镜像源和异步加载,从而优化网站速度。具体操作包括更改模板、注册 helper 和更改配置。其中,推荐使用 advVendors 配置,可自定义加载源和 js 文件名,同时支持异步加载、pjax 刷新和 integrity 防 XXS 等特性。

\n
\n

众所周知,jsdelivr 在国内的速度可以用慢的一批来形容而 shoka 主题使用了 jsdelivr 的 combine 功能加载第三方 js, 而 combine 在国内没有镜像源并且阻断了使用 CDN 并发加速的道路,本篇博文会将 jsdelivr 聚合拆分为几个独立的 js, 以便使用国内镜像源和异步加载。

\n
\n

此方案相较于本地化而言有较大速度优势,尤其在 CDN 并发加持下

\n
\n

# 拆分 jsdelivr

\n
    \n
  1. 更改模板
  2. \n
\n

打开 shoka\\layout\\_partials\\layout.njk,找到第 144 行左右:

\n

1
2
3
4
<script src="https://cdn.polyfill.io/v3/polyfill.js"></script>
&#123;&#123; _vendor_js() &#125;&#125;
&#123;&#123; _js('app.js') &#125;&#125;
&#123;&#123; partial('_partials/third-party/baidu-analytics.njk', &#123;&#125;, &#123;cache: true&#125;) &#125;&#125;

\n

更改为如下内容:

\n

1
2
3
4
5
6
7
8
9
10
<script src="https://cdn.polyfill.io/v3/polyfill.js"></script>
&#123;%- if theme.advVendors.enable %&#125;
\t&#123;% for i in _list_vendor_js() %&#125;
\t\t&#123;&#123; _adv_vendor_js(i) &#125;&#125;
\t&#123;% endfor %&#125;
&#123;%- else %&#125;
&#123;&#123; _vendor_js() &#125;&#125;
&#123;%- endif %&#125;
&#123;&#123; _js('app.js')&#125;&#125;
&#123;&#123; partial('_partials/third-party/baidu-analytics.njk', &#123;&#125;, &#123;cache: true&#125;) &#125;&#125;

\n
    \n
  1. 注册 helper
  2. \n
\n

打开 shoka\\scripts\\helpers\\asset.js, 最后一行新建空行,增加如下内容:

\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
hexo.extend.helper.register('_list_vendor_js', () => {
return hexo.theme.config.vendorsList.js;
});

hexo.extend.helper.register('_adv_vendor_js', function (js_name) {
const config = hexo.theme.config.advVendors.js[js_name];
const src = config["src"];
let result;
if (src.indexOf("http") !== -1) {
result = src;
} else if (src.indexOf("combine") !== -1) {
console.log("The combine feature is not recommended!")
result = hexo.theme.config.advVendors.combine + src;
} else if (src.indexOf("npm") !== -1) {
result = hexo.theme.config.advVendors.npm + src.slice(4);
} else if (src.indexOf("gh") !== -1) {
result = hexo.theme.config.advVendors.github + src.slice(3);
} else {
result = "/" + src;
}
let attr = {src: result};
if (config["async"]) attr["async"] = "async";
if (config["data-pjax"]) attr["data-pjax"] = "data-pjax";
if (config["hash-value"]) attr["integrity"]=config["hash-value"];
if (config["deferLoad"]) {
return htmlTag('script', {"data-pjax": true}, `
const script=document.createElement("script");script.src="${result}",script.async=true,document.body.appendChild(script)
`)
}
return htmlTag('script', attr, '');
})

\n
    \n
  1. 更改配置
  2. \n
\n

在 shoka 目录下 _config.yml 增加如下内容:

\n
\n

推荐内容,可根据自己情况更改

\n
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
advVendors:
enable: true
github: "https://cdn.jsdelivr.net/gh/"
combine: "https://cdn.jsdelivr.net/"
npm: "https://unpkg.com/"
js:
pace:
src: https://lf9-cdn-tos.bytecdntp.com/cdn/expire-1-M/pace/1.0.2/pace.min.js
pjax:
src: https://lib.baomitu.com/pjax/0.2.8/pjax.min.js
fetch:
src: npm/whatwg-fetch@3.4.0/dist/fetch.umd.js
anime:
src: https://lf9-cdn-tos.bytecdntp.com/cdn/expire-1-M/animejs/3.2.0/anime.min.js
algolia:
src: https://lf9-cdn-tos.bytecdntp.com/cdn/expire-1-M/algoliasearch/4.12.1/algoliasearch-lite.umd.min.js
instantsearch:
src: https://lf9-cdn-tos.bytecdntp.com/cdn/expire-1-M/instantsearch.js/4.39.0/instantsearch.production.min.js
lazyload:
src: https://lf9-cdn-tos.bytecdntp.com/cdn/expire-1-M/lozad.js/1.16.0/lozad.min.js
quicklink:
src: https://lf9-cdn-tos.bytecdntp.com/cdn/expire-1-M/quicklink/2.2.0/quicklink.umd.min.js
fancybox:
src: https://lf26-cdn-tos.bytecdntp.com/cdn/expire-1-M/??jquery/3.5.1/jquery.min.js,fancybox/3.5.7/jquery.fancybox.min.js,justifiedGallery/3.8.1/js/jquery.justifiedGallery.min.js
async: true
valine:
src: gh/amehime/MiniValine@4.2.2-beta10/dist/MiniValine.min.js
copy_tex:
src: https://lf9-cdn-tos.bytecdntp.com/cdn/expire-1-M/KaTeX/0.12.0/contrib/copy-tex.min.js
async: true
chart:
src: npm/frappe-charts@1.5.0/dist/frappe-charts.min.iife.js

vendorsList:
js:
- pace
- pjax
- fetch
- anime
- algolia
- instantsearch
- lazyload
- quicklink
- fancybox
- valine
- copy_tex
- chart

\n

下面为结构详解:

\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
advVendors:
\tenable: true #是否开启,关闭使用主题默认加载
\tgithub: #github 使用的加载源,需要协议头和末尾斜杠
\tcombine: #聚合 js 使用的加载源 (不建议使用)
\tnpm: #npm 的加载源
\tjs:
\t\tjspackage: #js 名,可以与文件名不一致
\t\t\tsrc: "资源地址,详情见后面"
\t\t\t# async: true 异步加载此 js
\t\t\t# data-pjax: true 在 pjax 加载时刷新此 js
\t\t\t# hash-value: 这个资源的 integrity 值,用于防 XXS
\t\t\t# deferLoad: true 使用动态 DOM 节点添加延迟 js 加载 (实验性)

vendorsList:
js:
\t- jspackage #与上方 jspackage 一致即可

\n

资源地址格式如下:

\n

https://example.com/xxx.js 使用 http (s) 地址加载 js
\ncombine/xxx.js,xxx.js 使用 jsdelivr 的 combine 功能加载 (不推荐)
\nnpm/xxx/xxx.js 使用 npm 源加载 js
\ngh/xxx/xxx.js 使用 gh 源加载 js
\nxxx.js 从本地加载 js
\n 优先级如下:
\nhttp>combine>npm>gh > 本地

\n", "tags": [ "极空间", "Docker", "Hexo", "Hexo", "Shoka", "jsdelivr" ] }, { "id": "https://hitoli.com/2023/05/30/http%E8%AF%B7%E6%B1%82%E4%B9%8BrestTemplate%E9%85%8D%E7%BD%AE%E8%B6%85%E6%97%B6%E6%97%B6%E9%97%B4/", "url": "https://hitoli.com/2023/05/30/http%E8%AF%B7%E6%B1%82%E4%B9%8BrestTemplate%E9%85%8D%E7%BD%AE%E8%B6%85%E6%97%B6%E6%97%B6%E9%97%B4/", "title": "http请求之restTemplate配置超时时间", "date_published": "2023-05-30T09:20:00.000Z", "content_html": "

# 问题

\n

http 请求发起后接收不到返回数据!!!【测试环境没出问题,发到正式环境就有问题】

\n

项目中通过 restTemplate 发起请求:
\n

1
2
3
4
5
6
7
8
log.info("请求入参:{}",JSON.toJSONString(request));//打印日志1
// 配置http请求的连接超时时间和读取超时时间
HttpsClientRequestFactory factory = new HttpsClientRequestFactory();
factory.setConnectTimeout(60 * 1000);
factory.setReadTimeout(5 * 60 * 1000);
RestTemplate restTemplate = new RestTemplate(factory);
Result<InventoryResult> result = restTemplate.postForObject(address.concat(inventoryUrl), request, Result.class);
log.info("库存同步,返回数据: {}", result);//打印日志2

\n\n

http 请求入参:{data=[{ productStatus=10,skuCode=null}], messageId=ewpfpr1t6ey5r6qj0su0w1h6rt73hr,token=vgvU5EJKuZbuHii7WH6pTINp40ZRicaqLz4dq5P7L6pDzWir8EEGZhCKPucQjljsw69EHasEy+iJfdTofDg==}

\n\n

# 解决方式

\n
# 第一种
\n
    \n
  1. \n

    添加 HttpsClientRequestFactory 类,并继承 SimpleClientHttpRequestFactory
    \n

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    /**
    * 兼容调Https接口
    */
    public class HttpsClientRequestFactory extends SimpleClientHttpRequestFactory {

    @Override
    protected void prepareConnection(HttpURLConnection connection, String httpMethod)
    throws IOException {
    if (connection instanceof HttpsURLConnection) {
    prepareHttpsConnection((HttpsURLConnection) connection);
    }
    super.prepareConnection(connection, httpMethod);
    }

    private void prepareHttpsConnection(HttpsURLConnection connection) {
    connection.setHostnameVerifier(new SkipHostnameVerifier());
    try {
    connection.setSSLSocketFactory(createSslSocketFactory());
    }
    catch (Exception ex) {
    // Ignore
    }
    }

    private SSLSocketFactory createSslSocketFactory() throws Exception {
    SSLContext context = SSLContext.getInstance("TLS");
    context.init(null, new TrustManager[] { new SkipX509TrustManager() },
    new SecureRandom());
    return context.getSocketFactory();
    }

    private class SkipHostnameVerifier implements HostnameVerifier {

    @Override
    public boolean verify(String s, SSLSession sslSession) {
    return true;
    }

    }

    private static class SkipX509TrustManager implements X509TrustManager {

    @Override
    public X509Certificate[] getAcceptedIssuers() {
    return new X509Certificate[0];
    }

    @Override
    public void checkClientTrusted(X509Certificate[] chain, String authType) {
    }

    @Override
    public void checkServerTrusted(X509Certificate[] chain, String authType) {
    }

    }
    }

    \n
  2. \n
  3. \n

    使用 restTemplate 发起请求前先设置连接和超时时间或者通过容器加载配置类然后设置超时时间
    \n

    1
    2
    3
    4
    5
    6
    //配置http请求的连接超时时间和读取超时时间
    HttpsClientRequestFactory factory = new HttpsClientRequestFactory();
    factory.setConnectTimeout(60 * 1000);
    factory.setReadTimeout(5 * 60 * 1000);
    RestTemplate restTemplate = new RestTemplate(factory);
    BaseResult<QueryInventoryResult> result = restTemplate.postForObject(address.concat(inventoryUrl), request, Result.class);

    \n
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    @Configuration
    public class RestConfig {

    //60 * 1000
    @Value("${rest.connectTimeout:60000}")
    private int connectTimeout;

    //5 * 60 * 1000
    @Value("${rest.readTimeout:300000}")
    private int readTimeout;

    @Bean
    public RestTemplate restTemplate() {
    SimpleClientHttpRequestFactory simpleClientHttpRequestFactory = new SimpleClientHttpRequestFactory();
    simpleClientHttpRequestFactory.setConnectTimeout(connectTimeout);
    simpleClientHttpRequestFactory.setReadTimeout(readTimeout);
    RestTemplate restTemplate = new RestTemplate(simpleClientHttpRequestFactory);
    return restTemplate;
    }

    \n
  4. \n
\n
# 第二种
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
@Configuration
public class RestConfig {

//60 * 1000
@Value("${rest.connectTimeout:60000}")
private int connectTimeout;

//5 * 60 * 1000
@Value("${rest.readTimeout:300000}")
private int readTimeout;

@Value("${rest.connectionRequestTimeout:300000}")
private int connectionRequestTimeout;

/**
* 使用 HttpComponentsClientHttpRequestFactory创建http请求(推荐)
*/
@Bean
public RestTemplate restTemplate() {
HttpComponentsClientHttpRequestFactory httpRequestFactory = new HttpComponentsClientHttpRequestFactory();
httpRequestFactory.setConnectionRequestTimeout(connectionRequestTimeout);
httpRequestFactory.setConnectTimeout(connectTimeout);
httpRequestFactory.setReadTimeout(readTimeout);
return new RestTemplate(httpRequestFactory);

}
}

\n
# 第三种(基于第二种升级)
\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
@Configuration
public class RestConfig {


/**
* 高并发采用HttpClient连接池
*/
@Bean
public RestTemplate restTemplate() {
return new RestTemplate(httpRequestFactory());
}


@Bean
public ClientHttpRequestFactory httpRequestFactory() {
return new HttpComponentsClientHttpRequestFactory(httpClient());
}


@Bean
public HttpClient httpClient() {
Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.getSocketFactory())
.register("https", SSLConnectionSocketFactory.getSocketFactory())
.build();
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(registry);
//设置整个连接池最大连接数
connectionManager.setMaxTotal(1000);
\t\t\t//路由是对maxTotal的细分
connectionManager.setDefaultMaxPerRoute(100);
//定义不活动的时间(毫秒),超过的连接从连接池拿取需要重新验证
connectionManager.setValidateAfterInactivity(200);
RequestConfig requestConfig = RequestConfig.custom()
.setSocketTimeout(30000) //返回数据的超时时间
.setConnectTimeout(20000) //连接上服务器的超时时间
.setConnectionRequestTimeout(1000) //从连接池中获取连接的超时时间
.build();
return HttpClientBuilder.create()
.setDefaultRequestConfig(requestConfig)
.setConnectionManager(connectionManager)
.evictIdleConnections(2, TimeUnit.SECONDS) //保持空闲的最大时间
.build();
}
}

\n", "tags": [ "工作", "解决问题", "RestTemplate" ] }, { "id": "https://hitoli.com/2023/05/25/%E8%A7%A3%E5%86%B3MySQL%E6%8A%A5only-full-group-by%E9%94%99%E8%AF%AF/", "url": "https://hitoli.com/2023/05/25/%E8%A7%A3%E5%86%B3MySQL%E6%8A%A5only-full-group-by%E9%94%99%E8%AF%AF/", "title": "解决MySQL报only_full_group_by错误", "date_published": "2023-05-25T10:25:00.000Z", "content_html": "

# 问题描述

\n

当我们迁移到 MySQL 5.7+ 的版本时,常会碰到 ERROR 1055 only_full_group_by 错误,这是 5.7 之后 SQL_MODE 默认打开了严格模式导致的错误。说明你代码里有地方写的不严谨。

\n

# 解决方法

\n
    \n
  1. 重写 sql
  2. \n
  3. 返回宽松模式
    \n
    1
    2
    3
    4
    5
    #查询sql_mode
    select @@GLOBAL.sql_mode;
    #删除ONLY_FULL_GROUP_BY
    #设置sql_mode
    set @@GLOBAL.sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION';
  4. \n
  5. 使用聚合函数
    \n如果某些特别的原因就是要查询未分组字段,但你又没空改代码,那么我们可使用聚合函数来规避这类错误,算是一种折中的方案了,语法改严谨了,代码又不需要大动。如 ANY_VALUE ()、MAX ()、MIN () 或者 GROUP_CONCAT () 等聚合函数。
  6. \n
\n", "tags": [ "工作", "解决问题", "mysql" ] }, { "id": "https://hitoli.com/2023/05/25/MySQL%E4%B8%AD%E7%9A%84any-value-%E5%87%BD%E6%95%B0/", "url": "https://hitoli.com/2023/05/25/MySQL%E4%B8%AD%E7%9A%84any-value-%E5%87%BD%E6%95%B0/", "title": "MySQL中的any_value()函数", "date_published": "2023-05-25T10:07:00.000Z", "content_html": "

# 问题

\n

业务要求:查询所有省份:
\n\"\"

\n

# 方法

\n
    \n
  1. \n

    distinct 排除重复
    \n

    1
    2
    3
    4
    5
    SELECT
    \tDISTINCT(province_code),
    \tprovince_name
    FROM
    \tt_mip_base_area

    \n
  2. \n
  3. \n

    group by 根据身份编码分组
    \n

    1
    2
    3
    4
    5
    SELECT 
    \tprovince_code,
    \tany_value(province_name)
    FROM t_mip_base_area
    GROUP BY province_code

    \n
  4. \n
\n
# 若这样写
\n

1
2
3
4
5
SELECT 
province_code,
province_name
FROM t_mip_base_area
GROUP BY province_code

\n 则会报错
\n\"\"

\n

# 总结:

\n
    \n
  1. \n

    MySQL5.7 之后,sql_mode 中 ONLY_FULL_GROUP_BY 模式默认设置为打开状态。

    \n
  2. \n
  3. \n

    ONLY_FULL_GROUP_BY 的语义就是确定 select target list 中的所有列的值都是明确语义,简单的说来,在此模式下,target list 中的值要么是来自于聚合函数(sum、avg、max 等)的结果,要么是来自于 group by list 中的表达式的值

    \n
  4. \n
  5. \n

    MySQL 提供了 any_value () 函数来抑制 ONLY_FULL_GROUP_BY 值被拒绝

    \n
  6. \n
  7. \n

    any_value () 会选择被分到同一组的数据里第一条数据的指定列值作为返回数据

    \n
  8. \n
\n", "tags": [ "工作", "解决问题", "mysql" ] }, { "id": "https://hitoli.com/2023/04/16/%E5%91%BD%E4%BB%A4%E5%A4%A7%E6%9D%82%E7%83%A9/", "url": "https://hitoli.com/2023/04/16/%E5%91%BD%E4%BB%A4%E5%A4%A7%E6%9D%82%E7%83%A9/", "title": "命令大杂烩", "date_published": "2023-04-16T05:32:00.000Z", "content_html": "

# 日常命令记录

\n

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#windows系统修复
DISM.exe /Online /Cleanup-image /Scanhealth
DISM.exe /Online /Cleanup-image /Restorehealth
sfc /scannow

#windows网络修复
netsh winsock reset

#合并ts文件
copy /b c:\\Users\\hito\\Downloads\\*.ts d:\\50.mp4

#windows11家庭版安装Hyper-V(以下命令拷贝到txt文本后重命名为bat文件然后用管理员权限运行)
pushd "%~dp0"
dir /b %SystemRoot%\\servicing\\Packages\\*Hyper-V*.mum >> hyper-v.txt
for /f %%i in ('findstr /i . hyper-v.txt 2^>nul') do dism /online /norestart /add-package:"%SystemRoot%\\servicing\\Packages\\%%i"
del hyper-v.txt
Dism /online /enable-feature /featurename:Microsoft-Hyper-V-All /LimitAccess /ALL

#mysql查看索引使用情况
EXPLAIN SELECT A.XXX, B.XXX FROM TABLE1 A, TABLE2 B ON A.XXX = B.XXX;

#mysql使用触发器插入数据自动生产uuid主键
insert_before BEFORE 插入
BEGIN
\t\t\tSET NEW.ID = REPLACE(UUID(),"-","");
END

#excel
#合并
=CONCATENATE("EX010",D2,C2,"1")
#截断
=LEFT(E2,6)
#判断取值
=IF(O2="宜都市","420500000000000",IF(O2="高新区","420592000000000",IF(O2="枝江市","420583000000000",IF(O2="当阳市","420582000000000",IF(O2="远安县","420525000000000",IF(O2="兴山县","420526000000000",IF(O2="秭归县","420527000000000",IF(O2="长阳","420528000000000",IF(O2="五峰县","420529000000000",IF(O2="夷陵区","420506000000000",IF(O2="点军区","420504000000000",IF(O2="西陵区","420502000000000",IF(OR(ISNUMBER(FIND({"伍家";"伍家区";"伍家岗区"},O2))),"420503000000000",IF(OR(ISNUMBER(FIND({"猇亭";"猇亭区"},O2))),"420505000000000"))))))))))))))
#经纬度计算
=LEFT(T2,FIND("°",T2)-1)+MID(T2,FIND("°",T2)+1,FIND("′",T2)-FIND("°",T2)-1)/60+MID(T2,FIND("′",T2)+1,FIND("″",T2)-FIND("′",T2)-1)/3600

\n", "tags": [ "生活", "技术分享", "命令" ] } ] }