import requests
import urllib.request
import os
def scrape_url(api_url):
r = requests.get(url=api_url)
json = r.json()
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Skin scraper')] # skins.tw blocks default urllib user agent for some reason.
urllib.request.install_opener(opener)
for skin in json['data']:
url = skin['full_path']
url = url.replace(" ", "%20")
save_path = f'''./{skin['name']}.png'''
if os.path.isfile(save_path):
continue
print(url)
urllib.request.urlretrieve(url, save_path)
if(json['next_page_url']):
scrape_url(json['next_page_url'])
scrape_url('https://api.skins.tw/api/assets/skin/100')