1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
|
#!/usr/bin/env python
#-*- coding:UTF-8 -*-
"""
@Item : Rsync Backup
@Author : Villiam Sheng
@Group : Linux Group
@Date : 2012-08-13
@Funtion:
rsync_conf: Configuration rsync server, and the server
#res[os.path.join(root,fpath)] = int((time.time() - os.stat(os.path.join(root,fpath)).st_ctime) / 86910)
#int((time.time() - os.stat(os.path.join(root,fpath)).st_ctime) / 86910)
#try:
# res[os.path.join(root,fpath)] = time.strptime((fpath.split('_')[2]),'%Y%m%d%H%M%S').tm_yday #截取文件名,得到时间天数,
#except Exception,e:
# print e
#res[os.path.join(root,fpath)] = int((time.time() - os.stat(os.path.join(root,fpath)).st_ctime) / 86910)
Central idea:
"""
import
os,sys,time,re,socket,threading,json,base64,traceback,ConfigParser,fcntl,struct
from
rsync_log
import
rsync_log
from
rsync_post
import
rsync_post
from
statvfs
import
F_BLOCKS,F_BAVAIL,F_BSIZE
pcg
=
0
""" 生成斐波那契数列"""
lists
=
[]
a,b
=
0
,
1
while
b <
=
365
:
a,b
=
b ,a
+
b
lists.append(b)
class
rsync_thread(threading.Thread):
def
__init__(
self
,path):
threading.Thread.__init__(
self
)
self
.log
=
rsync_log()
self
.path
=
path
""" 计算当前磁盘的使用百分比"""
def
disk(
self
):
try
:
vfs
=
os.statvfs(
self
.path)
disk_full
=
int
(vfs[F_BLOCKS]
*
vfs[F_BSIZE]
/
1024
/
1024
/
1024
)
disk_free
=
int
(vfs[F_BAVAIL]
*
vfs[F_BSIZE]
/
1024
/
1024
/
1024
)
return
'%.1f'
%
(
float
(disk_full
-
disk_free)
/
disk_full
*
100
)
except
:
self
.log.log_info(
'rsync_info.err'
,
'dfile.disk'
,traceback.print_exc())
return
traceback.print_exc()
def
run(
self
):
global
pcg
old_df
=
[]
# 上一年的删除历史文件
new_df
=
[]
# 今年的删除历史文件
sf
=
[]
# 保留的历史文件
res
=
{}
# 所有文件的天数及文件的路径
rs
=
0
# 删除文件的总和
size
=
[]
# 获取删除文件的大小
msize
=
[]
# 今天备份所有文件的大小
tday_size
=
[]
# 今天备份文件的大小
ms
=
0
# 今天备份文件的总和
year
=
time.localtime().tm_year
""" 得到文件的天数,以文件名作为key,天数作为value """
for
root,dirs,files
in
os.walk(
self
.path):
for
fpath
in
files:
res[os.path.join(root,fpath)]
=
time.localtime(os.stat(os.path.join(root,fpath)).st_ctime).tm_yday
""" 判断文件的天数是否符合斐波那契数列,符合条件append到sf列表中,不符合append df列表中 """
for
v,k
in
res.items():
if
k
in
lists:
sf.append(k)
self
.log.log_info(
'log_info.save'
,
'dfile.rsync_thread'
,
'%s:::%s'
%
(v,k))
elif
k
not
in
lists:
if
year !
=
time.localtime(os.stat(v).st_ctime).tm_year:
old_df.append({v:k})
else
:
new_df.append({v:k})
"""
try:
for s in range(len(new_df)):
for f,k in new_df[s].items():
tday_size.append(k)
if max({}.fromkeys(tday_size).keys()) == k:
msize.append(os.path.getsize(f))
except:
pass
"""
c
=
[]
pcg
=
float
(
self
.disk())
""" 判断今天是否有新的文件备份,在删除的列表中删除最后一天的数据,但必须保证磁盘的使用百分比大于 %55 """
if
time.localtime().tm_yday
in
res.values():
if
len
(old_df) !
=
0
:
for
s
in
range
(
len
(old_df)):
for
f,k
in
old_df[s].items():
c.append(k)
for
s
in
range
(
len
(old_df)):
for
f,k
in
old_df[s].items():
if
min
({}.fromkeys(c).keys())
=
=
k
and
pcg >
91
:
size.append(os.path.getsize(f))
os.system(
'rm -frv %s'
%
f)
self
.log.log_info(
'log_info.delete'
,
'remove cmd'
,
'rm -frv %s %s'
%
(f,k))
elif
pcg <
=
91
:
break
pcg
=
float
(
self
.disk())
elif
len
(new_df) !
=
0
:
for
s
in
range
(
len
(new_df)):
for
f,k
in
new_df[s].items():
c.append(k)
for
s
in
range
(
len
(new_df)):
for
f,k
in
new_df[s].items():
if
min
({}.fromkeys(c).keys())
=
=
k
and
pcg >
91
:
size.append(os.path.getsize(f))
os.system(
'rm -frv %s'
%
f)
self
.log.log_info(
'log_info.delete'
,
'remove cmd'
,
'rm -frv %s %s'
%
(f,k))
elif
pcg <
=
91
:
break
pcg
=
float
(
self
.disk())
for
s
in
size:
rs
+
=
s
#for m in msize:
# ms += m
self
.log.log_info(
'log_info.delete'
,
'Disk release %s %s MB'
%
(
self
.path,rs
/
1024
/
1024
),
'Disk append %s %s MB'
%
(
self
.path,ms
/
1024
/
1024
))
else
:
self
.log.log_info(
'log_info.delete'
,
'Disk files '
,
' %s No update file'
%
self
.path)
sys.exit()
class
rsync_dfile(
object
):
def
__init__(
self
):
self
.log
=
rsync_log()
self
.rsync_post
=
rsync_post()
def
work(
self
):
fp
=
open
(
'/proc/mounts'
,
'r'
)
m_info
=
fp.readlines()
fp.close()
data
=
{}
sections
=
[]
for
i
in
m_info:
if
i.find(
'data=ordered'
) !
=
-
1
or
i.find(
'mfs'
) !
=
-
1
or
i.find(
'nfs'
) !
=
-
1
:
if
os.path.ismount(
str
(i.split()[
1
])):
if
str
(i.split()[
1
]) !
=
'/'
:
if
str
(i.split()[
1
]) !
=
'/root'
:
if
str
(i.split()[
1
]) !
=
'/var'
:
if
len
(i.split()[
1
]) !
=
1
:
if
not
i.find(
'sunrpc'
) !
=
-
1
:
rs_thread
=
rsync_thread(i.split()[
1
])
rs_thread.start()
while
threading.active_count() >
1
:
time.sleep(
1
)
conf
=
ConfigParser.ConfigParser()
conf.read(
'/etc/rsyncd.conf'
)
try
:
for
i
in
conf.sections():
if
i !
=
'global'
:
sections.append(i)
for
i
in
sections:
vfs
=
os.statvfs(conf.get(i,
'path'
))
disk_full
=
int
(vfs[F_BLOCKS]
*
vfs[F_BSIZE]
/
1024
/
1024
/
1024
)
disk_free
=
int
(vfs[F_BAVAIL]
*
vfs[F_BSIZE]
/
1024
/
1024
/
1024
)
s
=
socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try
:
ip
=
socket.inet_ntoa(fcntl.ioctl(s.fileno(),
0x8915
,struct.pack(
'24s'
,
'eth0'
))[
20
:
24
])
except
:
ip
=
socket.inet_ntoa(fcntl.ioctl(s.fileno(),
0x8915
,struct.pack(
'24s'
,
'eth1'
))[
20
:
24
])
t_info
=
{
'flag'
:
0
,
'store_ip'
:ip,
'store_module'
:i,
'store_path'
:conf.get(i,
'path'
),
'disk_full'
:disk_full,
'disk_free'
:disk_free,
'action'
:
'rsync_renew'
}
data[
'param'
]
=
base64.b64encode(json.dumps(t_info))
self
.rsync_post.work(data)
self
.log.log_info(
'rsync_info.err'
,
'dfile.work'
,t_info)
except
Exception,e:
t_info
=
{
'flag'
:
1
,
'store_ip'
:ip,
'store_module'
:i,
'store_path'
:conf.get(i,
'path'
),
'disk_full'
:disk_full,
'disk_free'
:disk_free,
'action'
:
'rsync_renew'
}
data[
'param'
]
=
base64.b64encode(json.dumps(t_info))
self
.rsync_post.work(data)
self
.log.log_info(
'rsync_info.err'
,
'dfile.work'
,e)
if
__name__
=
=
"__main__"
:
rs
=
rsync_dfile()
while
True
:
rs.work()
if
pcg <
=
91
:
break
|
本文转自 swq499809608 51CTO博客,原文链接:http://blog.51cto.com/swq499809608/1313815