logstash 增量同步postgreSQL,刷不到数据问题

1.场景

logstash 数据库配置,每分钟刷新

input {
    
    
 #bms
 jdbc {
    
    
    jdbc_driver_library => "../db/postgresql-9.4-1206-jdbc42.jar"
    jdbc_driver_class => "org.postgresql.Driver"
    jdbc_connection_string => "jdbc:postgresql://xx.xx.xx.xx:5432/portal_test?currentSchema=public"
    jdbc_user => "postgres"
    jdbc_password => "xx"
    jdbc_paging_enabled => true
    jdbc_page_size => "1000"
    #timezone
    jdbc_default_timezone => "Asia/Shanghai"
    # 每分钟同步一次数据
    schedule => "* * * * *"
    clean_run => true
    statement_filepath => "../db/bms.sql"
    #use_column_value => true
    type => "bms"
  }
}


filter {
    
    
    json {
    
    
        source => "message"
        remove_field => ["message"]
    }
}


output {
    
    
    if[type] == "bms" {
    
    
 elasticsearch {
    
    
        hosts => ["xx.xx.xx.xx:9200"]
        index => "%{type}"
        document_id => "%{id}"
        document_type =>"%{type}"}

}
    stdout {
    
    
        codec => json_lines
    }
}

增量同步脚本

SELECT
	bms_meeting_detail.ID AS ID,
	bms_meeting_detail.meeting_type AS title,
	bms_meeting.description AS content,
	bms_meeting.meeting_creator AS meetingcreator,
	bms_meeting_detail.meeting_start_time AS createtime,
	bms_meeting_detail.meeting_end_time AS updatetime,
	bms_meeting_detail.modifier AS modifier,
	bms_meeting_detail.participants AS participants,
	bms_meeting.is_delete AS isdelete,
	bms_meeting_detail.meeting_owner AS meetingowner,
	bms_meeting_detail.meeting_conclusion AS meetingconclusion,
	bms_meeting_detail.is_published AS ispublished,
	A.documentcontent AS documentcontent 
FROM
    bms_meeting
	LEFT JOIN bms_meeting_detail ON bms_meeting."id" = bms_meeting_detail.meeting_id
	LEFT JOIN (
	SELECT
		bms_meeting_links.meeting_detail_id AS id,
		GROUP_CONCAT ( bms_meeting_links.link_name ) AS documentcontent 
	FROM
		bms_meeting_links
		LEFT JOIN bms_meeting_detail ON bms_meeting_detail."id" = bms_meeting_links.meeting_detail_id 
	GROUP BY
		bms_meeting_links.meeting_detail_id 
	) AS A ON  A."id" = bms_meeting_detail."id" 
WHERE bms_meeting.is_delete =0 AND bms_meeting_detail.create_time > :sql_last_value 

2.结果和原因

结果
启动logstash后,通过页面操作,然后实时查看logstash 日志,发现无法捕捉到刷新的数据。
原因
原因是:bms_meeting_detail.create_time > :sql_last_value
研究发现, :sql_last_value 的时间是服务器时间,并且,每次同步的数据是该时间点1分钟以内的数据;
发现数据库的create_time,不在1分钟以内。
然后,马上验证,操作一次生成新的数据,同系统时间进行对比,发现数据库记录的create_time慢了3分钟。

3. 处理办法

同步linux 系统时间和数据库的时间。

yum install ntp
ntpdate time.nist.gov
date

参考:
https://zhidao.baidu.com/question/585857949.html?fr=iks&word=linux%D0%A3%D5%FD%B7%FE%CE%F1%C6%F7%CF%B5%CD%B3%CA%B1%BC%E4&ie=gbk

猜你喜欢

转载自blog.csdn.net/leinminna/article/details/110850014