python多线程编程

简介:

Python多线程编程中常用方法:

1、join()方法:如果一个线程或者在函数执行的过程中调用另一个线程,并且希望待其完成操作后才能执行,那么在调用线程的时就可以使用被调线程的join方法join([timeout]) timeout:可选参数,线程运行的最长时间

2、isAlive()方法:查看线程是否还在运行

3、getName()方法:获得线程名

4、setDaemon()方法:主线程退出时,需要子线程随主线程退出,则设置子线程的setDaemon()

Python线程同步:

(1)Thread的Lock和RLock实现简单的线程同步:

复制代码
import threading
import time
class mythread(threading.Thread):
    def __init__(self,threadname):
        threading.Thread.__init__(self,name=threadname)
    def run(self):
        global x
        lock.acquire()
        for i in range(3):
            x = x+1
        time.sleep(1)
        print x
        lock.release()

if __name__ == '__main__':
    lock = threading.RLock()
    t1 = []
    for i in range(10):
        t = mythread(str(i))
        t1.append(t)
    x = 0
    for i in t1:
        i.start()
复制代码

(2)使用条件变量保持线程同步:

复制代码
# coding=utf-8
import threading

class Producer(threading.Thread):
    def __init__(self,threadname):
        threading.Thread.__init__(self,name=threadname)
    def run(self):
        global x
        con.acquire()
        if x == 10000:
            con.wait() 
            pass
        else:
            for i in range(10000):
                x = x+1
                con.notify()
        print x
        con.release()

class Consumer(threading.Thread):
    def __init__(self,threadname):
        threading.Thread.__init__(self,name=threadname)
    def run(self):
        global x
        con.acquire()
        if x == 0:
            con.wait()
            pass
        else:
            for i in range(10000):
                x = x-1
            con.notify()
        print x
        con.release()

if __name__ == '__main__':
    con = threading.Condition()
    x = 0
    p = Producer('Producer')
    c = Consumer('Consumer')
    p.start()
    c.start()
    p.join()
    c.join()
    print x
复制代码

(3)使用队列保持线程同步:

复制代码
# coding=utf-8
import threading
import Queue
import time
import random

class Producer(threading.Thread):
    def __init__(self,threadname):
        threading.Thread.__init__(self,name=threadname)
    def run(self):
        global     queue
        i = random.randint(1,5)
        queue.put(i)
        print self.getName(),' put %d to queue' %(i)
        time.sleep(1)

class Consumer(threading.Thread):
    def __init__(self,threadname):
        threading.Thread.__init__(self,name=threadname)
    def run(self):
        global     queue
        item = queue.get()
        print self.getName(),' get %d from queue' %(item)
        time.sleep(1)

if __name__ == '__main__':
    queue = Queue.Queue()
    plist = []
    clist = []
    for i in range(3):
        p = Producer('Producer'+str(i))
        plist.append(p)
    for j in range(3):
        c = Consumer('Consumer'+str(j))
        clist.append(c)
    for pt in plist:
        pt.start()
        pt.join()
    for ct in clist:
        ct.start()
        ct.join()
复制代码

生产者消费者模式的另一种实现:

复制代码
# coding=utf-8
import time
import threading
import Queue

class Consumer(threading.Thread):
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self._queue = queue

    def run(self):
        while True:
            # queue.get() blocks the current thread until an item is retrieved.
            msg = self._queue.get()
            # Checks if the current message is the "quit"
            if isinstance(msg, str) and msg == 'quit':
                # if so, exists the loop
                break
            # "Processes" (or in our case, prints) the queue item
            print "I'm a thread, and I received %s!!" % msg
        # Always be friendly!
        print 'Bye byes!'

class Producer(threading.Thread):
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self._queue = queue

    def run(self):
        # variable to keep track of when we started
        start_time = time.time()
        # While under 5 seconds..
        while time.time() - start_time < 5:
            # "Produce" a piece of work and stick it in the queue for the Consumer to process
            self._queue.put('something at %s' % time.time())
            # Sleep a bit just to avoid an absurd number of messages
            time.sleep(1)
        # This the "quit" message of killing a thread.
        self._queue.put('quit')

if __name__ == '__main__':
    queue = Queue.Queue()
    consumer = Consumer(queue)
    consumer.start()
    producer1 = Producer(queue)
    producer1.start()
复制代码

使用线程池(Thread pool)+同步队列(Queue)的实现方式:

复制代码
# A more realistic thread pool example
# coding=utf-8
import time 
import threading 
import Queue 
import urllib2 

class Consumer(threading.Thread): 
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self._queue = queue 
 
    def run(self):
        while True: 
            content = self._queue.get() 
            if isinstance(content, str) and content == 'quit':
                break
            response = urllib2.urlopen(content)
        print 'Bye byes!'
 
def Producer():
    urls = [
        'http://www.python.org', 'http://www.yahoo.com'
        'http://www.scala.org', 'http://cn.bing.com'
        # etc.. 
    ]
    queue = Queue.Queue()
    worker_threads = build_worker_pool(queue, 4)
    start_time = time.time()
    # Add the urls to process
    for url in urls: 
        queue.put(url)  
    # Add the 'quit' message
    for worker in worker_threads:
        queue.put('quit')
    for worker in worker_threads:
        worker.join()
 
    print 'Done! Time taken: {}'.format(time.time() - start_time)
 
def build_worker_pool(queue, size):
    workers = []
    for _ in range(size):
        worker = Consumer(queue)
        worker.start() 
        workers.append(worker)
    return workers
 
if __name__ == '__main__':
    Producer()
复制代码

另一个使用线程池+Map的实现:

复制代码
import urllib2 
from multiprocessing.dummy import Pool as ThreadPool 
 
urls = [
    'http://www.python.org', 
    'http://www.python.org/about/',
    'http://www.python.org/doc/',
    'http://www.python.org/download/',
    'http://www.python.org/community/'
    ]
 
# Make the Pool of workers
pool = ThreadPool(4) 
# Open the urls in their own threads
# and return the results
results = pool.map(urllib2.urlopen, urls)
#close the pool and wait for the work to finish 
pool.close() 
pool.join()
复制代码


相关文章
|
1月前
|
安全 Python
Python中的并发编程:多线程与多进程技术探究
本文将深入探讨Python中的并发编程技术,重点介绍多线程和多进程两种并发处理方式的原理、应用场景及优缺点,并结合实例分析如何在Python中实现并发编程,以提高程序的性能和效率。
|
5月前
|
并行计算 安全 Java
python多线程
python多线程
|
5月前
|
Java 测试技术 Python
python多线程使用案例
python多线程使用案例
|
7月前
|
存储 消息中间件 安全
Linux多线程编程
Linux多线程编程
102 2
|
调度 Python
python多线程详解
python多线程详解目录 python多线程详解一、线程介绍什么是线程为什么要使用多线程二、线程实现threading模块自定义线程守护线程主线程等待子线程结束多线程共享全局变量互斥锁递归锁信号量(BoundedSemaphore类)事件(Event类)三、GIL(Global Interpreter Lock)全局解释器锁python多线程详解一、线程介绍什么是线程线程(Thread)也叫轻量级进程,是操作系统能够进行运算调度的最小单位,它被包涵在进程之中,是进程中的实际运作单位。
1101 0
|
Linux Python
一篇文章搞懂Python多线程简单实现和GIL
公众号:pythonislover 今天开始打算开一个新系列,就是python的多线程和多进程实现,这部分可能有些新手还是比较模糊的,都知道python中的多线程是假的,但是又不知道怎么回事,首先我们看一个例子来看看python多线程的实现。
1175 0
|
测试技术 Python
Python多线程与多进程浅析之二
Python 多线程 Step by Step Python 在 CPU 密集运算的场景,多个线程并不能提高太多性能,而对于 I/O 阻塞的场景,可以使得运行效率获得几倍的提高。我们接下来会详细的分析一下。
1964 0

热门文章

最新文章