如何高效利用Python实现图片库中重复图片的查找与删除?
- 内容介绍
- 文章标签
- 相关推荐
本文共计808个文字,预计阅读时间需要4分钟。
本示例代码展示了如何使用Python查找并删除重复的图片。代码结合了图片处理和网络爬虫技术,既可以单独使用,也可以与网络爬虫配合。以下是从网上爬取的图片重复问题,代码支持识别不同尺寸的图片。
pythonimport osimport hashlibfrom PIL import Image
def get_image_hash(image_path): 计算图片的哈希值 img=Image.open(image_path) hash_obj=hashlib.md5() for byte in img.tobytes(): hash_obj.update(byte) return hash_obj.hexdigest()
def find_duplicate_images(directory): 在指定目录中查找重复的图片 hash_dict={} for root, dirs, files in os.walk(directory): for file in files: if file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')): file_path=os.path.join(root, file) file_hash=get_image_hash(file_path) if file_hash in hash_dict: hash_dict[file_hash].append(file_path) else: hash_dict[file_hash]=[file_path]
duplicates={key: value for key, value in hash_dict.items() if len(value) > 1} return duplicates
def delete_duplicates(duplicates): 删除重复的图片 for duplicates_list in duplicates.values(): for i in range(len(duplicates_list) - 1): os.remove(duplicates_list[i])
使用示例directory='path_to_your_directory'duplicates=find_duplicate_images(directory)delete_duplicates(duplicates)
本文实例为大家分享了python查找重复图片并删除的具体代码,供大家参考,具体内容如下
和网络爬虫配套的,也可单独使用,从网上爬下来的图片重复太多,代码支持识别不同尺寸大小一致的图片,并把重复的图片删除,只保留第一份。
# -*- coding: utf-8 -*- import cv2 import numpy as np import os,sys,types def cmpandremove2(path): dirs = os.listdir(path) dirs.sort() if len(dirs) <= 0: return dict={} for i in dirs: prepath = path + "/" + i preimg = cv2.imread(prepath) if type(preimg) is types.NoneType: continue preresize = cv2.resize(preimg, (8,8)) pregray = cv2.cvtColor(preresize, cv2.COLOR_BGR2GRAY) premean = cv2.mean(pregray)[0] prearr = np.array(pregray.data) for j in range(0,len(prearr)): if prearr[j] >= premean: prearr[j] = 1 else: prearr[j] = 0 print "get", prepath dict[i] = prearr dictkeys = dict.keys() dictkeys.sort() index = 0 while True: if index >= len(dictkeys): break curkey = dictkeys[index] dellist=[] print curkey index2 = index while True: if index2 >= len(dictkeys): break j = dictkeys[index2] if curkey == j: index2 = index2 + 1 continue arr1 = dict[curkey] arr2 = dict[j] diff = 0 for k in range(0,len(arr2)): if arr1[k] != arr2[k]: diff = diff + 1 if diff <= 5: dellist.append(j) index2 = index2 + 1 if len(dellist) > 0: for j in dellist: file = path + "/" + j print "remove", file os.remove(file) dict.pop(j) dictkeys = dict.keys() dictkeys.sort() index = index + 1 def cmpandremove(path): index = 0 flag = 0 dirs = os.listdir(path) dirs.sort() if len(dirs) <= 0: return 0 while True: if index >= len(dirs): break prepath = path + dirs[index] print prepath index2 = 0 preimg = cv2.imread(prepath) if type(preimg) is types.NoneType: index = index + 1 continue preresize = cv2.resize(preimg, (8, 8)) pregray = cv2.cvtColor(preresize, cv2.COLOR_BGR2GRAY) premean = cv2.mean(pregray)[0] prearr = np.array(pregray.data) for i in range(0, len(prearr)): if prearr[i] >= premean: prearr[i] = 1 else: prearr[i] = 0 removepath = [] while True: if index2 >= len(dirs): break if index2 != index: curpath = path + dirs[index2] # print curpath curimg = cv2.imread(curpath) if type(curimg) is types.NoneType: index2 = index2 + 1 continue curresize = cv2.resize(curimg, (8, 8)) curgray = cv2.cvtColor(curresize, cv2.COLOR_BGR2GRAY) curmean = cv2.mean(curgray)[0] curarr = np.array(curgray.data) for i in range(0, len(curarr)): if curarr[i] >= curmean: curarr[i] = 1 else: curarr[i] = 0 diff = 0 for i in range(0, len(curarr)): if curarr[i] != prearr[i]: diff = diff + 1 if diff <= 5: print 'the same' removepath.append(curpath) flag = 1 index2 = index2 + 1 index = index + 1 if len(removepath) > 0: for file in removepath: print "remove", file os.remove(file) dirs = os.listdir(path) dirs.sort() if len(dirs) <= 0: return 0 # index = 0 return flag path = 'pics/' cmpandremove(path)
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持易盾网络。
本文共计808个文字,预计阅读时间需要4分钟。
本示例代码展示了如何使用Python查找并删除重复的图片。代码结合了图片处理和网络爬虫技术,既可以单独使用,也可以与网络爬虫配合。以下是从网上爬取的图片重复问题,代码支持识别不同尺寸的图片。
pythonimport osimport hashlibfrom PIL import Image
def get_image_hash(image_path): 计算图片的哈希值 img=Image.open(image_path) hash_obj=hashlib.md5() for byte in img.tobytes(): hash_obj.update(byte) return hash_obj.hexdigest()
def find_duplicate_images(directory): 在指定目录中查找重复的图片 hash_dict={} for root, dirs, files in os.walk(directory): for file in files: if file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')): file_path=os.path.join(root, file) file_hash=get_image_hash(file_path) if file_hash in hash_dict: hash_dict[file_hash].append(file_path) else: hash_dict[file_hash]=[file_path]
duplicates={key: value for key, value in hash_dict.items() if len(value) > 1} return duplicates
def delete_duplicates(duplicates): 删除重复的图片 for duplicates_list in duplicates.values(): for i in range(len(duplicates_list) - 1): os.remove(duplicates_list[i])
使用示例directory='path_to_your_directory'duplicates=find_duplicate_images(directory)delete_duplicates(duplicates)
本文实例为大家分享了python查找重复图片并删除的具体代码,供大家参考,具体内容如下
和网络爬虫配套的,也可单独使用,从网上爬下来的图片重复太多,代码支持识别不同尺寸大小一致的图片,并把重复的图片删除,只保留第一份。
# -*- coding: utf-8 -*- import cv2 import numpy as np import os,sys,types def cmpandremove2(path): dirs = os.listdir(path) dirs.sort() if len(dirs) <= 0: return dict={} for i in dirs: prepath = path + "/" + i preimg = cv2.imread(prepath) if type(preimg) is types.NoneType: continue preresize = cv2.resize(preimg, (8,8)) pregray = cv2.cvtColor(preresize, cv2.COLOR_BGR2GRAY) premean = cv2.mean(pregray)[0] prearr = np.array(pregray.data) for j in range(0,len(prearr)): if prearr[j] >= premean: prearr[j] = 1 else: prearr[j] = 0 print "get", prepath dict[i] = prearr dictkeys = dict.keys() dictkeys.sort() index = 0 while True: if index >= len(dictkeys): break curkey = dictkeys[index] dellist=[] print curkey index2 = index while True: if index2 >= len(dictkeys): break j = dictkeys[index2] if curkey == j: index2 = index2 + 1 continue arr1 = dict[curkey] arr2 = dict[j] diff = 0 for k in range(0,len(arr2)): if arr1[k] != arr2[k]: diff = diff + 1 if diff <= 5: dellist.append(j) index2 = index2 + 1 if len(dellist) > 0: for j in dellist: file = path + "/" + j print "remove", file os.remove(file) dict.pop(j) dictkeys = dict.keys() dictkeys.sort() index = index + 1 def cmpandremove(path): index = 0 flag = 0 dirs = os.listdir(path) dirs.sort() if len(dirs) <= 0: return 0 while True: if index >= len(dirs): break prepath = path + dirs[index] print prepath index2 = 0 preimg = cv2.imread(prepath) if type(preimg) is types.NoneType: index = index + 1 continue preresize = cv2.resize(preimg, (8, 8)) pregray = cv2.cvtColor(preresize, cv2.COLOR_BGR2GRAY) premean = cv2.mean(pregray)[0] prearr = np.array(pregray.data) for i in range(0, len(prearr)): if prearr[i] >= premean: prearr[i] = 1 else: prearr[i] = 0 removepath = [] while True: if index2 >= len(dirs): break if index2 != index: curpath = path + dirs[index2] # print curpath curimg = cv2.imread(curpath) if type(curimg) is types.NoneType: index2 = index2 + 1 continue curresize = cv2.resize(curimg, (8, 8)) curgray = cv2.cvtColor(curresize, cv2.COLOR_BGR2GRAY) curmean = cv2.mean(curgray)[0] curarr = np.array(curgray.data) for i in range(0, len(curarr)): if curarr[i] >= curmean: curarr[i] = 1 else: curarr[i] = 0 diff = 0 for i in range(0, len(curarr)): if curarr[i] != prearr[i]: diff = diff + 1 if diff <= 5: print 'the same' removepath.append(curpath) flag = 1 index2 = index2 + 1 index = index + 1 if len(removepath) > 0: for file in removepath: print "remove", file os.remove(file) dirs = os.listdir(path) dirs.sort() if len(dirs) <= 0: return 0 # index = 0 return flag path = 'pics/' cmpandremove(path)
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持易盾网络。

