Py3基础day9(continue0)
paramiko模块
ssh.py
import paramiko #创建SSH对象 ssh = paramiko.SSHClient() #允许连接不在konw_hosts文件中的主机 ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #连接服务器 ssh.close(hostname = '10.0.0.31',port = 52113,username = 'root',password = '123456') #执行命令 stdin,stdout,stderr = ssh.exec_command('df') #获取命令结果 res,err = stdout.read(),stderr.read() result = res if res else err result = stdout.read() print(result.decode()) #关闭连接 ssh.close()
ssh_ftp.py
import paramiko transport = paramiko.Transport(('10.0.0.31',521132)) transport.connect(username='root',password='123456') sft = paramiko.SFTPClient.from_transport(transport) #将location.py上传至服务器/tmp/test.py sft.put('/tmp/location.py','/tmp/test/test.py') #将remove_path下载到本地 local_path sft.get('remove_path','local_path') transport.close()
ssh秘钥
RSA -非对称秘钥验证
公钥 public key
私钥 private key >10.0.0.41
(私钥)10.0.0.31 ------>10.0.0.41(公钥)
rwx(属主) rwx(属组)rwx(others)
ssh rsaa.py
import paramiko private_key = paramiko.RSAKey.from_private_key_file('ssh rsaa31') # 创建SSH对象 ssh = paramiko.SSHClient() # 允许连接不在know_hosts文件中的主机 ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # 连接服务器 ssh.connect(hostname='10.0.0.41', port=52113, username='gongli', key=private_key) # 执行命令 stdin, stdout, stderr = ssh.exec_command('df') # 获取命令结果 result = stdout.read() print(result.decode()) # 关闭连接 ssh.close()
什么是线程;;;
线程是操作系统能够进行运算调度的最小单位。它被包含在进程之中,是进程中的实际运作单位。一条线程指的是进程中一个单一顺序的控制流,一个进程中可以并发多个线程,每条线程并行执行不同的任务;
disk -----RAM------------CPU
线程:一些指令;
进程:qq 要以一个整体的形式暴露给操作系统管理,里面包含对各种资源的调用,内存的 对各种资源管理的集合 就可
成为进程;
线程:是操作系统最小的调度单位,是一串指令的集合;;
进程:要操作CPU,必须要先创建一个线程;
所有在同一个进程里的线程是共享同一块内存空间;;
线程和进程那个快;;;;;;;;坑坑坑;;;;
启动线程和进程启动那个快;;;线程快;;;;
线程共享内存空间,进程的内存是独立的;;;;
两个进程的线程之间可以直接交流;两个进程之间要想通信,必须通过一个中间代理来实现;;;
创建新线程很简单,创建新进程需要对其父进程一次克隆;;;
一个线程可以控制和操作同一进程里的其他线程;;;但是进程只能操作子进程;;
简单的多线程并发例子;
import threading import time def run(n): print("task",n) time.sleep(2) t1 = threading.Thread(target=run,args=("t1",)) t2 = threading.Thread(target=run,args=("t2",)) t1.start() t2.start()
加类的多线程例子;
import threading import time class MyThead(threading.Thread): def __init__(self,n): super(MyThead,self).__init__() self.n = n def run(self): print("runnint task",self.n) t1 = MyThead("t1") t2 = MyThead("t2") t1.start() t2.start()
例子3
import threading
import time def run(n): print("task",n) time.sleep(2) print("task done",n) start_time = time.time() for i in range(50): t = threading.Thread(target=run,args=("t-%s"%i,)) t.start() #t1 = threading.Thread(target=run, args=("t1",)) #t2 = threading.Thread(target=run, args=("t2",)) #t1.start() #t2.start() print("-------all threading has finished...") print("cost:",time.time()-start_time)
例子4
import threading
import time class MyThead(threading.Thread): def __init__(self,n,sleep_time): super(MyThead,self).__init__() self.n = n self.sleep_time = sleep_time def run(self): print("runnint task",self.n) time.sleep(self.sleep_time) print("task done,",self.n) t1 = MyThead("t1",2) t2 = MyThead("t2",4) t1.start() t2.start() t1.join() #wait() t1.join() #wait() print("main thread....")
例子:
import threading import time def run(n): print("task",n) time.sleep(2) print("task done",n) start_time = time.time() t_objs = [] for i in range(50): t = threading.Thread(target=run,args=("t-%s"%i,)) t.start() t_objs.append(t) #t1 = threading.Thread(target=run, args=("t1",)) #t2 = threading.Thread(target=run, args=("t2",)) #t1.start() #t2.start() for t in t_objs: t.join() print("-------all threading has finished...") print("cost:",time.time()-start_time)
升级版;;;;
import threading import time def run(n): print("task",n,threading.current_thread()) time.sleep(2) print("task done",n) start_time = time.time() t_objs = [] for i in range(50): t = threading.Thread(target=run,args=("t-%s"%i,)) t.start() t_objs.append(t) #t1 = threading.Thread(target=run, args=("t1",)) #t2 = threading.Thread(target=run, args=("t2",)) #t1.start() #t2.start() for t in t_objs: t.join() print("-------all threading has finished...",threading.current_thread(),threading.active_count()) print("cost:",time.time()-start_time)
守护线程
import threading import time def run(n): print("task",n,) time.sleep(2) print("task done",n) start_time = time.time() t_objs = [] #存线程实例 for i in range(50): t = threading.Thread(target=run,args=("t-%s"%i,)) t.setDaemon(True) #把当前线程设置为守护线程 t.start() t_objs.append(t) #为了不阻塞后面线程的启动,不在这里jion,先放到一个列表里 #t1 = threading.Thread(target=run, args=("t1",)) #t2 = threading.Thread(target=run, args=("t2",)) #t1.start() #t2.start() #for t in t_objs: #循环线程实例列表,等待所有线程执行完毕 #t.join() print("-------all threading has finished...",threading.current_thread(),threading.active_count()) print("cost:",time.time()-start_time)
无论多少线程;python只有一个线程在跑;;;
python 直接调用c语言的接口;;;python解释器只有等c语言的结果;;;
全局解释器锁;;;
lock;;;;
import threading
import time def run(n): global num num +=1 num = 0 start_time = time.time() t_objs = [] #存线程实例 for i in range(50): t = threading.Thread(target=run,args=("t-%s"%i,)) #t.setDaemon(True) #把当前线程设置为守护线程 t.start() t_objs.append(t) #为了不阻塞后面线程的启动,不在这里jion,先放到一个列表里 #for t in t_objs: #循环线程实例列表,等待所有线程执行完毕 #t.join() print("-------all threading has finished...",threading.current_thread(),threading.active_count()) #print("cost:",time.time()-start_time) print("num:",num)
GIL VS Lock
机智的同学可能会问到这个问题,就是既然你之前说过了,
Python已经有一个GIL来保证同一时间只能有一个线程来执行了,
为什么这里还需要lock? 注意啦,这里的lock是用户级的lock,跟那个GIL没关系 ,
具体我们通过下图来看一下+配合我现场讲给大家,就明白了。
递归锁RLOCK
import threading, time def run1(): print("grab the first part data") lock.acquire() global num num += 1 lock.release() return num def run2(): print("grab the second part data") lock.acquire() global num2 num2 += 1 lock.release() return num2 def run3(): lock.acquire() res = run1() print('--------between run1 and run2-----') res2 = run2() lock.release() print(res, res2) if __name__ == '__main__': num, num2 = 0, 0 lock = threading.RLock() #递归锁 for i in range(10): t = threading.Thread(target=run3) t.start() while threading.active_count() != 1: print(threading.active_count()) else: print('----all threads done---') print(num, num2)线程锁(互斥锁Mutex)
import
time
import
threading
def
addNum():
global
num
#在每个线程中都获取这个全局变量
print
(
'--get num:'
,num )
time.sleep(
1
)
lock.acquire()
#修改数据前加锁
num
-
=
1
#对此公共变量进行-1操作
lock.release()
#修改后释放
num
=
100
#设定一个共享变量
thread_list
=
[]
lock
=
threading.Lock()
#生成全局锁
for
i
in
range
(
100
):
t
=
threading.Thread(target
=
addNum)
t.start()
thread_list.append(t)
for
t
in
thread_list:
#等待所有线程执行完毕
t.join()
print
(
'final num:'
, num )
import threading, time def run(n): semaphore.acquire() time.sleep(1) print("run the thread: %s\n" % n) semaphore.release() if __name__ == '__main__': num = 0 semaphore = threading.BoundedSemaphore(5) # 最多允许5个线程同时运行 for i in range(20): t = threading.Thread(target=run, args=(i,)) t.start() while threading.active_count() != 1: pass # print threading.active_count() else: print('----all threads done---') print(num)
Event(事件)
import threading,time event = threading.Event() def lighter(): count = 0 while True: if count >20 and count <30: #改成红灯 event.clear() #标志位清了 print("\033[41;1mred light is on...\033[0m") elif count >30: event.set() #变绿灯 count = 0 else: print("\033[42;1mgreen light is on...\033[0m") time.sleep(1) count +=1 light = threading.Thread(target = lighter()) light.start()
红绿灯:
import threading,time event = threading.Event() def lighter(): count = 0 event.set() #设为绿灯 while True: if count >5 and count <10: #改成红灯 event.clear() #标志位清了 print("\033[41;1mred light is on...\033[0m") elif count >10: event.set() #变绿灯 count = 0 else: print("\033[42;1mgreen light is on...\033[0m") time.sleep(1) count +=1 def car(name): while True: if event.is_set(): #代表绿灯 print("[%s] running...." %name) time.sleep(1) else: print("[%s] sees red light ,waiting...." %name) event.wait() print("\033[34;1m[%s] green light is on,start going...\033[0m" %name) light = threading.Thread(target = lighter()) light.start() car = threading.Thread(target = car,args=("Tesla",)) car.start()
queue队列::::
queue is especially useful in threaded programming when information must be exchanged safely between multiple threads.
- class
queue.
Queue
(maxsize=0) #先入先出
- class
queue.
LifoQueue
(maxsize=0) #last in fisrt out - class
queue.
PriorityQueue
(maxsize=0) #存储数据时可设置优先级的队列
import queue q = queue.LifoQueue() q.put(1) q.put(2) q.put(3) print(q.get()) print(q.get()) print(q.get())
排序
import queue q = queue.LifoQueue() q.put((10,"alex")) q.put((-1,"chengronghua")) q.put((3,"hanyang")) q.put((6,"wangseng")) print(q.get()) print(q.get()) print(q.get()) print(q.get())
生产者消费者模型
在并发编程中使用生产者和消费者模式能够解决绝大多数并发问题。该模式通过平衡生产线程和消费线程的工作能力来提高程序的整体处理数据的速度。
为什么要使用生产者和消费者模式
在线程世界里,生产者就是生产数据的线程,消费者就是消费数据的线程。在多线程开发当中,如果生产者处理速度很快,而消费者处理速度很慢,那么生产者就必须等待消费者处理完,才能继续生产数据。同样的道理,如果消费者的处理能力大于生产者,那么消费者就必须等待生产者。为了解决这个问题于是引入了生产者和消费者模式。
什么是生产者消费者模式
生产者消费者模式是通过一个容器来解决生产者和消费者的强耦合问题。生产者和消费者彼此之间不直接通讯,而通过阻塞队列来进行通讯,所以生产者生产完数据之后不用等待消费者处理,直接扔给阻塞队列,消费者不找生产者要数据,而是直接从阻塞队列里取,阻塞队列就相当于一个缓冲区,平衡了生产者和消费者的处理能力。
import threading,time import queue q = queue.Queue(maxsize=10) def Producer(name): count = 1 while True: q.put("骨头%s" %count) print("生产了骨头",count) count += 1 time.sleep(1) def Consumer(name): #while q.qsize()>0: while True: print("[%s] 取到[%s] 并且吃了它...." %(name,q.get())) time.sleep(1) p = threading.Thread(target=Producer,args=("Alex",)) c = threading.Thread(target=Consumer,args=("ChengRonghua",)) c1 = threading.Thread(target=Consumer,args=("Wangseng",)) p.start() c.start() c1.start()