1. Basic understanding

1. Send a GET request

import requests

if __name__ == “__main__”:

Get a get request

response = requests.get(‘http://httpbin.org/get’)

2. Common return values about getting the request to data

import requests

if __name__ == “__main__”:

Get a get request

response = requests.get(‘http://httpbin.org/get’)

# Set the encoding for the crawled site

response.encoding = ‘utf-8’

Print the returned data

print(response.text)

print(response.json())

print(response.headers)

print(response.status_code)

print(response.url)

print(response.cookies)

print(response.json())

# Retrieve the original string, no encoding (when user response.text is garbled, and when downloading binary files)

print(response.content)

3. Other requests

response = requests.post(‘http://httpbin.org/post’)

response = requests.put(‘http://httpbin.org/put’)

response = requests.delete(‘http://httpbin.org/delete’)

response = requests.head(‘http://httpbin.org/get’)

response = requests.options(‘http://httpbin.org/get’)

Get request parameters

1. Concatenate parameters directly after the URL address

import requests

if __name__ == “__main__”:

Define a request header (mock browser)

Headers = {‘ user-agent ‘: ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36’}

# set parameters

data = {‘name’: ‘june’, ‘password’: 123456}

Get a get request

response = requests.get(‘http://httpbin.org/get?name=june&password=123456’, headers=headers)

# Set the encoding for the crawled site

response.encoding = ‘utf-8’

print(response.text)

2. Use params to pass parameters

import requests

if __name__ == “__main__”:

Define a request header (mock browser)

Headers = {‘ user-agent ‘: ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36’}

# set parameters

data = {‘name’: ‘june’, ‘password’: 123456}

Get a get request

response = requests.get(‘http://httpbin.org/get’, headers=headers, params=data)

# Set the encoding for the crawled site

response.encoding = ‘utf-8’

print(response.text)

Download the content using the Requests library and regular expressions

1, need to download bole online article title

2. Write logical code

import re

import requests

if __name__ == “__main__”:

headers = {

‘the user-agent’ : ‘Mozilla / 5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36’,

}

url = ‘http://python.jobbole.com/category/guide/’

response = requests.get(url=url, headers=headers)

pattern = re.compile(

‘<div.*? post-thumb.*? title=”(.*?) “. *? </a>’, re.S

)

print(response.status_code)

result_list = re.findall(pattern, response.text)

f = open(‘jobbole1.txt’, ‘a+’, encoding=’utf8′)

for item in result_list:

f.write(item.strip() + ‘\n’)

f.close()

3. Explain regular expressions

. *? Matches any character that is not greedy

Re.s enables. To match all characters, including newlines

Download images using the Requests library and regular expressions

1, guide package

import re

import os

import shutil

import requests

2. Define a class for downloading images

class DownPic(object):

def __init__(self):

self.url = ‘http://python.jobbole.com/category/guide/’

self.headers = {

‘the user-agent’ : ‘Mozilla / 5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36’,

}

self.create_dir()

def create_dir(cls):

Delete the folder if it exists

if os.path.exists(‘demo’):

shutil.rmtree(‘demo’)

os.makedirs(‘demo’)

def get_html(self):

response = requests.get(url=self.url, headers=self.headers)

return response.text

def pattern(self):

pattern = re.compile(

‘<div.*? post-thumb.*? src=”(.*?) “. *? </a>’, re.S

)

result_list = re.findall(pattern, self.get_html())

return result_list

def download(self):

for item in self.pattern():

# obtain the image address request again

if item.rsplit(‘.’)[-1] in [‘png’, ‘jpg’]:

resp = requests.get(item.strip())

try:

with open(os.path.join(‘demo’, item.strip().rsplit(“/”)[-1]), ‘wb’) as f:

f.write(resp.content)

except Exception as e:

print(e)

else:

continue

3, call

if __name__ == “__main__”:

p = DownPic()

p.download()

Post requests for the Requests library

1, the format

response = requests.post(‘http://httpbin.org/post’, headers=headers, data=data)

2. Send data to the server

import requests

if __name__ == “__main__”:

Define a request header (mock browser)

headers = {

‘the user-agent’ : ‘Mozilla / 5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36’}

# set parameters

data = {’email’: ’[email protected]’, ‘password’: 123456}

Get a get request

response = requests.post(‘https://httpbin.org/post’, headers=headers, data=data)

# Set the encoding for the crawled site

response.encoding = ‘utf-8’

print(response.text)

6. Use post request to get job information

import requests

if __name__ == “__main__”:

url = ‘https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false’

headers = {

‘the user-agent’ : ‘Mozilla / 5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36’,

‘Referer’: ‘https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=’

}

data = {

‘first’: ‘true’,

‘pn’: ‘1’,

‘kd’: ‘python’,

}

response = requests.post(url=url, headers=headers, data=data)

print(response.json())