You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

251 lines
20 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import os\n",
"import wget\n",
"import zipfile\n",
"import numpy as np\n",
"import pandas as pd\n",
"import networkx as nx\n",
"import plotly.graph_objects as go\n",
"# from utils import *\n",
"from collections import Counter\n",
"from tqdm import tqdm\n",
"import time\n",
"import geopandas as gpd\n",
"import gdown # for downloading files from google drive\n",
"\n",
"# ignore warnings\n",
"import warnings\n",
"import sys\n",
"warnings.filterwarnings(\"ignore\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def download_dataTMPsets():\n",
"\n",
" dict = {\n",
" \"brightkite\": [\"https://snap.stanford.edu/data/loc-brightkite_edges.txt.gz\", \"https://snap.stanford.edu/data/loc-brightkite_totalCheckins.txt.gz\"], \n",
" \"gowalla\": [\"https://snap.stanford.edu/data/loc-gowalla_edges.txt.gz\", \"https://snap.stanford.edu/data/loc-gowalla_totalCheckins.txt.gz\"], \n",
" \"foursquare\": \"https://drive.google.com/file/d/1PNk3zY8NjLcDiAbzjABzY5FiPAFHq6T8/view?usp=sharing\"}\n",
"\n",
" if not os.path.exists(\"dataTMP\"):\n",
" os.mkdir(\"dataTMP\")\n",
" print(\"Created dataTMP folder\")\n",
"\n",
" for folder in dict.keys():\n",
" if not os.path.exists(os.path.join(\"dataTMP\", folder)):\n",
" os.mkdir(os.path.join(\"dataTMP\", folder))\n",
" print(\"Created {} folder\".format(folder))\n",
"\n",
" for folder in dict.keys():\n",
" for url in dict[folder]:\n",
" if folder == \"foursquare\":\n",
" if not os.path.exists(os.path.join(\"dataTMP\", folder, \"foursquare_full.zip\")):\n",
" output = os.path.join(\"dataTMP\", folder, \"foursquare_full.zip\")\n",
" gdown.download(url, output, quiet=False, fuzzy=True)\n",
" else :\n",
" print(\"{} already downloaded\".format(url))\n",
" else:\n",
" if not os.path.exists(os.path.join(\"dataTMP\", folder, url.split(\"/\")[-1])):\n",
" print(\"Downloading {}...\".format(url))\n",
" wget.download(url, os.path.join(\"dataTMP\", folder))\n",
" else :\n",
" print(\"{} already downloaded\".format(url))\n",
"\n",
" for folder in dict.keys():\n",
" for file in os.listdir(os.path.join(\"dataTMP\", folder)):\n",
" if file.endswith(\".gz\"):\n",
" print(\"Unzipping {}...\".format(file))\n",
" os.system(\"gunzip {}\".format(os.path.join(\"dataTMP\", folder, file)))\n",
" elif file.endswith(\".zip\"):\n",
" print(\"Unzipping {}...\".format(file))\n",
" os.system(\"unzip {}\".format(os.path.join(\"dataTMP\", folder, file)))"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Created dataTMP folder\n",
"Created brightkite folder\n",
"Created gowalla folder\n",
"Created foursquare folder\n",
"Downloading https://snap.stanford.edu/data/loc-brightkite_edges.txt.gz...\n",
"Downloading https://snap.stanford.edu/data/loc-brightkite_totalCheckins.txt.gz...\n",
"Downloading https://snap.stanford.edu/data/loc-gowalla_edges.txt.gz...\n",
"Downloading https://snap.stanford.edu/data/loc-gowalla_totalCheckins.txt.gz...\n"
]
},
{
"ename": "MissingSchema",
"evalue": "Invalid URL 'h': No scheme supplied. Perhaps you meant http://h?",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mMissingSchema\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[9], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m download_dataTMPsets()\n",
"Cell \u001b[0;32mIn[8], line 22\u001b[0m, in \u001b[0;36mdownload_dataTMPsets\u001b[0;34m()\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m os\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mexists(os\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mjoin(\u001b[39m\"\u001b[39m\u001b[39mdataTMP\u001b[39m\u001b[39m\"\u001b[39m, folder, \u001b[39m\"\u001b[39m\u001b[39mfoursquare_full.zip\u001b[39m\u001b[39m\"\u001b[39m)):\n\u001b[1;32m 21\u001b[0m output \u001b[39m=\u001b[39m os\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mjoin(\u001b[39m\"\u001b[39m\u001b[39mdataTMP\u001b[39m\u001b[39m\"\u001b[39m, folder, \u001b[39m\"\u001b[39m\u001b[39mfoursquare_full.zip\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m---> 22\u001b[0m gdown\u001b[39m.\u001b[39;49mdownload(url, output, quiet\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m, fuzzy\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m)\n\u001b[1;32m 23\u001b[0m \u001b[39melse\u001b[39;00m :\n\u001b[1;32m 24\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m already downloaded\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m.\u001b[39mformat(url))\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/gdown/download.py:158\u001b[0m, in \u001b[0;36mdownload\u001b[0;34m(url, output, quiet, proxy, speed, use_cookies, verify, id, fuzzy, resume)\u001b[0m\n\u001b[1;32m 156\u001b[0m \u001b[39mwhile\u001b[39;00m \u001b[39mTrue\u001b[39;00m:\n\u001b[1;32m 157\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 158\u001b[0m res \u001b[39m=\u001b[39m sess\u001b[39m.\u001b[39;49mget(url, headers\u001b[39m=\u001b[39;49mheaders, stream\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m, verify\u001b[39m=\u001b[39;49mverify)\n\u001b[1;32m 159\u001b[0m \u001b[39mexcept\u001b[39;00m requests\u001b[39m.\u001b[39mexceptions\u001b[39m.\u001b[39mProxyError \u001b[39mas\u001b[39;00m e:\n\u001b[1;32m 160\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39mAn error has occurred using proxy:\u001b[39m\u001b[39m\"\u001b[39m, proxy, file\u001b[39m=\u001b[39msys\u001b[39m.\u001b[39mstderr)\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/requests/sessions.py:600\u001b[0m, in \u001b[0;36mSession.get\u001b[0;34m(self, url, **kwargs)\u001b[0m\n\u001b[1;32m 592\u001b[0m \u001b[39m\u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Sends a GET request. Returns :class:`Response` object.\u001b[39;00m\n\u001b[1;32m 593\u001b[0m \n\u001b[1;32m 594\u001b[0m \u001b[39m:param url: URL for the new :class:`Request` object.\u001b[39;00m\n\u001b[1;32m 595\u001b[0m \u001b[39m:param \\*\\*kwargs: Optional arguments that ``request`` takes.\u001b[39;00m\n\u001b[1;32m 596\u001b[0m \u001b[39m:rtype: requests.Response\u001b[39;00m\n\u001b[1;32m 597\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 599\u001b[0m kwargs\u001b[39m.\u001b[39msetdefault(\u001b[39m\"\u001b[39m\u001b[39mallow_redirects\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mTrue\u001b[39;00m)\n\u001b[0;32m--> 600\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mrequest(\u001b[39m\"\u001b[39;49m\u001b[39mGET\u001b[39;49m\u001b[39m\"\u001b[39;49m, url, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/requests/sessions.py:573\u001b[0m, in \u001b[0;36mSession.request\u001b[0;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[0m\n\u001b[1;32m 560\u001b[0m \u001b[39m# Create the Request.\u001b[39;00m\n\u001b[1;32m 561\u001b[0m req \u001b[39m=\u001b[39m Request(\n\u001b[1;32m 562\u001b[0m method\u001b[39m=\u001b[39mmethod\u001b[39m.\u001b[39mupper(),\n\u001b[1;32m 563\u001b[0m url\u001b[39m=\u001b[39murl,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 571\u001b[0m hooks\u001b[39m=\u001b[39mhooks,\n\u001b[1;32m 572\u001b[0m )\n\u001b[0;32m--> 573\u001b[0m prep \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mprepare_request(req)\n\u001b[1;32m 575\u001b[0m proxies \u001b[39m=\u001b[39m proxies \u001b[39mor\u001b[39;00m {}\n\u001b[1;32m 577\u001b[0m settings \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmerge_environment_settings(\n\u001b[1;32m 578\u001b[0m prep\u001b[39m.\u001b[39murl, proxies, stream, verify, cert\n\u001b[1;32m 579\u001b[0m )\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/requests/sessions.py:484\u001b[0m, in \u001b[0;36mSession.prepare_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 481\u001b[0m auth \u001b[39m=\u001b[39m get_netrc_auth(request\u001b[39m.\u001b[39murl)\n\u001b[1;32m 483\u001b[0m p \u001b[39m=\u001b[39m PreparedRequest()\n\u001b[0;32m--> 484\u001b[0m p\u001b[39m.\u001b[39;49mprepare(\n\u001b[1;32m 485\u001b[0m method\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49mmethod\u001b[39m.\u001b[39;49mupper(),\n\u001b[1;32m 486\u001b[0m url\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49murl,\n\u001b[1;32m 487\u001b[0m files\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49mfiles,\n\u001b[1;32m 488\u001b[0m data\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49mdata,\n\u001b[1;32m 489\u001b[0m json\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49mjson,\n\u001b[1;32m 490\u001b[0m headers\u001b[39m=\u001b[39;49mmerge_setting(\n\u001b[1;32m 491\u001b[0m request\u001b[39m.\u001b[39;49mheaders, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mheaders, dict_class\u001b[39m=\u001b[39;49mCaseInsensitiveDict\n\u001b[1;32m 492\u001b[0m ),\n\u001b[1;32m 493\u001b[0m params\u001b[39m=\u001b[39;49mmerge_setting(request\u001b[39m.\u001b[39;49mparams, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mparams),\n\u001b[1;32m 494\u001b[0m auth\u001b[39m=\u001b[39;49mmerge_setting(auth, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mauth),\n\u001b[1;32m 495\u001b[0m cookies\u001b[39m=\u001b[39;49mmerged_cookies,\n\u001b[1;32m 496\u001b[0m hooks\u001b[39m=\u001b[39;49mmerge_hooks(request\u001b[39m.\u001b[39;49mhooks, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mhooks),\n\u001b[1;32m 497\u001b[0m )\n\u001b[1;32m 498\u001b[0m \u001b[39mreturn\u001b[39;00m p\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/requests/models.py:368\u001b[0m, in \u001b[0;36mPreparedRequest.prepare\u001b[0;34m(self, method, url, headers, files, data, params, auth, cookies, hooks, json)\u001b[0m\n\u001b[1;32m 365\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Prepares the entire request with the given parameters.\"\"\"\u001b[39;00m\n\u001b[1;32m 367\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mprepare_method(method)\n\u001b[0;32m--> 368\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mprepare_url(url, params)\n\u001b[1;32m 369\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mprepare_headers(headers)\n\u001b[1;32m 370\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mprepare_cookies(cookies)\n",
"File \u001b[0;32m/usr/lib/python3.10/site-packages/requests/models.py:439\u001b[0m, in \u001b[0;36mPreparedRequest.prepare_url\u001b[0;34m(self, url, params)\u001b[0m\n\u001b[1;32m 436\u001b[0m \u001b[39mraise\u001b[39;00m InvalidURL(\u001b[39m*\u001b[39me\u001b[39m.\u001b[39margs)\n\u001b[1;32m 438\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m scheme:\n\u001b[0;32m--> 439\u001b[0m \u001b[39mraise\u001b[39;00m MissingSchema(\n\u001b[1;32m 440\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mInvalid URL \u001b[39m\u001b[39m{\u001b[39;00murl\u001b[39m!r}\u001b[39;00m\u001b[39m: No scheme supplied. \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 441\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mPerhaps you meant http://\u001b[39m\u001b[39m{\u001b[39;00murl\u001b[39m}\u001b[39;00m\u001b[39m?\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 442\u001b[0m )\n\u001b[1;32m 444\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m host:\n\u001b[1;32m 445\u001b[0m \u001b[39mraise\u001b[39;00m InvalidURL(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mInvalid URL \u001b[39m\u001b[39m{\u001b[39;00murl\u001b[39m!r}\u001b[39;00m\u001b[39m: No host supplied\u001b[39m\u001b[39m\"\u001b[39m)\n",
"\u001b[0;31mMissingSchema\u001b[0m: Invalid URL 'h': No scheme supplied. Perhaps you meant http://h?"
]
}
],
"source": [
"download_dataTMPsets()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"def download_dataTMPsets():\n",
"\n",
" urls = [\n",
" [\"https://snap.stanford.edu/dataTMP/loc-brightkite_edges.txt.gz\", \"https://snap.stanford.edu/dataTMP/loc-brightkite_totalCheckins.txt.gz\"],\n",
" [\"https://snap.stanford.edu/dataTMP/loc-gowalla_edges.txt.gz\", \"https://snap.stanford.edu/dataTMP/loc-gowalla_totalCheckins.txt.gz\"],\n",
" [\"https://drive.google.com/file/d/1PNk3zY8NjLcDiAbzjABzY5FiPAFHq6T8/view?usp=sharing\"]\n",
" ]\n",
"\n",
" folders = [\"brightkite\", \"gowalla\", \"foursquare\"]\n",
"\n",
" if not os.path.exists(\"dataTMP\"):\n",
" os.mkdir(\"dataTMP\")\n",
"\n",
" for folder in folders:\n",
" if not os.path.exists(os.path.join(\"dataTMP\", folder)):\n",
" os.mkdir(os.path.join(\"dataTMP\", folder))\n",
"\n",
" # Download every url in their respective folder. For the last one, we have to use gdown, because it's a google drive link. If the file is already downloaded, skip the download\n",
"\n",
" for i in range(len(urls)):\n",
" for url in urls[i]:\n",
" if not os.path.exists(os.path.join(\"dataTMP\", folders[i], url.split(\"/\")[-1])):\n",
" if i == 2:\n",
" output = os.path.join(\"dataTMP\", folders[i], \"something.zip\")\n",
" gdown.download(url, output, quiet=False, fuzzy=True)\n",
" else:\n",
" wget.download(url, os.path.join(\"dataTMP\", folders[i]))\n",
"\n",
"download_dataTMPsets()\n",
" # # unzip all the files in the 3 folders. Then remove the .gz or .zip files\n",
"\n",
" # for folder in folders:\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", folder)):\n",
" # print(folder, file)\n",
" # if file.endswith(\".gz\"):\n",
" # os.system(\"gunzip {}\".format(os.path.join(\"dataTMP\", folder, file)))\n",
" # elif file.endswith(\".zip\"):\n",
" # os.system(\"unzip {}\".format(os.path.join(\"dataTMP\", folder, file)))\n",
" # os.remove(os.path.join(\"dataTMP\", folder, file))\n",
"\n",
" # # take all the .txt files from dataTMP/foursquare/dataTMPset_WWW2019 and move them to dataTMP/foursquare\n",
"\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", \"foursquare\", \"dataTMPset_WWW2019\")):\n",
" # if file.endswith(\".txt\"):\n",
" # os.rename(os.path.join(\"dataTMP\", \"foursquare\", \"dataTMPset_WWW2019\", file), os.path.join(\"dataTMP\", \"foursquare\", file))\n",
"\n",
" # # remove the dataTMPset_WWW2019 folder, note that is not empty\n",
" # # os.rmdir(os.path.join(\"dataTMP\", \"foursquare\", \"dataTMPset_WWW2019\"))\n",
"\n",
" # for file in [\"dataTMPset_WWW_friendship_old.txt\", \"dataTMPset_WWW_readme.txt\", \"raw_Checkins_anonymized.txt\", \"raw_POIs.txt\"]:\n",
" # os.remove(os.path.join(\"dataTMP\", \"foursquare\", file))\n",
"\n",
" # # Now we want to clean our dataTMP and rename the files.\n",
"\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", \"brightkite\")):\n",
" # if file.endswith(\"_edges.txt\"):\n",
" # os.rename(os.path.join(\"dataTMP\", \"brightkite\", file), os.path.join(\"dataTMP\", \"brightkite\", \"brightkite_friends_edges.txt\"))\n",
"\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", \"gowalla\")):\n",
" # if file.endswith(\"_edges.txt\"):\n",
" # os.rename(os.path.join(\"dataTMP\", \"gowalla\", file), os.path.join(\"dataTMP\", \"gowalla\", \"gowalla_friends_edges.txt\"))\n",
"\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", \"foursquare\")):\n",
" # if file.endswith(\"dataTMPset_WWW_friendship_new.txt\"):\n",
" # os.rename(os.path.join(\"dataTMP\", \"foursquare\", file), os.path.join(\"dataTMP\", \"foursquare\", \"foursquare_friends_edges.txt\"))\n",
"\n",
" # # Now we from the _totalCheckins.txt files we want to keep only the first and last column, which are the user ID and the venue ID. We also want to remove the header of the file.\n",
"\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", \"brightkite\")):\n",
" # if file.endswith(\"_totalCheckins.txt\"):\n",
" # df = pd.read_csv(os.path.join(\"dataTMP\", \"brightkite\", file), sep=\"\\t\", header=None, names=[\"user_id\", \"check-in time\", \"latitude\", \"longitude\", \"venue_id\"])\n",
" # df[\"check-in time\"] = pd.to_datetime(df[\"check-in time\"])\n",
" # df = df[df[\"check-in time\"].dt.year == 2010]\n",
" # df = df.drop([\"check-in time\", \"latitude\", \"longitude\"], axis=1)\n",
" # df.to_csv(os.path.join(\"dataTMP\", \"brightkite\", \"brightkite_checkins.txt\"), sep=\"\\t\", header=False, index=False, errors=\"ignore\", encoding=\"utf-8\")\n",
" # os.remove(os.path.join(\"dataTMP\", \"brightkite\", file))\n",
"\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", \"gowalla\")):\n",
" # if file.endswith(\"_totalCheckins.txt\"):\n",
" # df = pd.read_csv(os.path.join(\"dataTMP\", \"gowalla\", file), sep=\"\\t\", header=None, names=[\"user_id\", \"check-in time\", \"latitude\", \"longitude\", \"venue_id\"])\n",
" # df[\"check-in time\"] = pd.to_datetime(df[\"check-in time\"])\n",
" # df = df[df[\"check-in time\"].dt.year == 2010]\n",
" # df = df.drop([\"check-in time\", \"latitude\", \"longitude\"], axis=1)\n",
" # df.to_csv(os.path.join(\"dataTMP\", \"gowalla\", \"gowalla_checkins.txt\"), sep=\"\\t\", header=False, index=False, errors=\"ignore\", encoding=\"utf-8\")\n",
" # os.remove(os.path.join(\"dataTMP\", \"gowalla\", file))\n",
"\n",
" # for file in os.listdir(os.path.join(\"dataTMP\", \"foursquare\")):\n",
" # if file.endswith(\"dataTMPset_WWW_Checkins_anonymized.txt\"):\n",
" # df = pd.read_csv(os.path.join(\"dataTMP\", \"foursquare\", file), sep=\"\\t\", header=None)\n",
" # df = df[[0, 1]]\n",
" # df.to_csv(os.path.join(\"dataTMP\", \"foursquare\", \"foursquare_checkins.txt\"), sep=\"\\t\", header=False, index=False, errors=\"ignore\", encoding=\"utf-8\")\n",
" # os.remove(os.path.join(\"dataTMP\", \"foursquare\", file))\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "e7370f93d1d0cde622a1f8e1c04877d8463912d04d973331ad4851f04de6915a"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}