1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
| # set GOOG_KEY env variable to a valid api key
# all rounds available at https://sites.google.com/view/3cb-metashape/pairings-results/past-results?authuser=0
import requests, os, re, io, sqlite3, titlecase, string, openpyxl, json, sys
from datetime import datetime, timedelta
from Levenshtein import distance
from bs4 import BeautifulSoup
con = sqlite3.connect("3cb.db")
def getall(query):
return [a[0] for a in con.cursor().execute(query).fetchall()]
allcards=getall("select name from mtg")
def run_rounds():
file_ids_in_db = getall("select fileid from round")
for n, file in enumerate(get_round_fileids()):
if file in file_ids_in_db:
continue
if file != "1LGHvTrQz2zhBjz1PhlW61ZYmwRWJyWkEHj-png7ZKdw": # misc file
print(f"analyzing round {n+1}...")
save_round(n+1, file)
# scrape site to list rounds
def get_round_fileids():
allrounds = requests.get("https://sites.google.com/view/3cb-metashape/pairings-results/past-results")
sheetre = re.compile("spreadsheets/d/(.*?)/")
curr_round = requests.get("https://sites.google.com/view/3cb-metashape/pairings-results")
ids = sheetre.findall(allrounds.text + curr_round.text)
return ids
def file_to_wb(fileid):
params = {"mimeType": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
headers = {"x-goog-api-key": os.environ.get("GOOG_KEY")}
out = []
res = requests.get(f"https://www.googleapis.com/drive/v3/files/{fileid}/export",
params=params,
headers=headers)
if res.status_code != 200:
print(res.text)
sys.exit(1)
return openpyxl.load_workbook(io.BytesIO(res.content), data_only=True)
def get_file_created_date(fileid):
headers = {"x-goog-api-key": os.environ.get("GOOG_KEY")}
params = {"fields": "createdTime"}
res = requests.get(f"https://www.googleapis.com/drive/v3/files/{fileid}",
params=params,
headers=headers)
if res.status_code != 200:
print(res.text)
return None
data = res.json()
if "createdTime" in data:
# Parse ISO format and return YYYY-MM-DD
created_time = datetime.fromisoformat(data["createdTime"].replace('Z', '+00:00'))
return created_time.strftime('%Y-%m-%d')
return None
def update_bans():
# Fetch the banlist from the Google Sheet
sheet_id = "1NZuROOCctbq4p4-CAHE-jOC0675QQjuqVnUqdXFCVD8"
wb = file_to_wb(sheet_id)
# Access the "banlist" sheet (sheet 2)
banlist_sheet = wb["banlist"]
cards = []
# Extract card names from first column, skipping header row
for row in banlist_sheet.iter_rows(min_row=1, min_col=1, max_col=1):
card_name = row[0].value
if card_name:
cards.append(card_name)
cur = con.cursor()
cur.execute("drop table if exists ban")
cur.execute("create table ban (name text)")
for card in cards:
if card:
cur.execute("insert into ban values (?)", (clean_card(card),))
con.commit()
def load_schema():
with open('schema.sql', 'r') as f:
schema_sql = f.read()
con.executescript(schema_sql)
def save_sheet(cur, sheet, n, final):
sheetrows = list(sheet.rows)
rows = [[cell.value for cell in row if cell.value != None] for row in sheetrows]
group = ""
i = 0
while i < len(rows):
row = rows[i]
i += 1
if len(row) < 3:
continue
group = row[0].partition(" ")[2]
if final:
group = "final"
try:
start = row.index("VS") + 1
end = row.index("Score")
if sheetrows[0][0].value == None: # missing text in upper left cell
start += 1
end += 1
except ValueError:
start = 2
end = len(row)
players = row[start:end]
for _ in range(len(players)):
row = rows[i]
i += 1
player = row[0]
if len(player) > 50 and player[0] == "B":
player = "Beefman" # messy unicode
if player == "IGNORE WAS MISTAKE":
continue
cards = row[1].split("\n")
if not final: #already done
cur.execute("insert into deck values (?,?,?,?,?);", (n, player, clean_card(cards[0]), clean_card(cards[1]), clean_card(cards[2])))
for o, s in enumerate(row[start:end]):
if s == "?" or s == "":
s = 0
cur.execute("insert into match values (?, ?, ?, ?, ?);",
(n, group, player,players[o],int(s)));
def save_round(n, fileid):
wb = file_to_wb(fileid)
try:
groups = wb["Groups"]
except KeyError:
groups = wb["Group"]
# Get creation date from Google Drive API
date_str = get_file_created_date(fileid)
cur = con.cursor()
save_sheet(cur, groups, n, False)
if len(wb._sheets) > 1:
final = wb["Final Group"]
save_sheet(cur, final, n, True)
cur.execute("insert into round values (?, ?, ?);", (n, fileid, date_str));
con.commit()
# fixing some data issues
def best_guess(card):
m = 6
outcard = ""
for c in allcards:
if c.startswith(card):
return c
d = distance(card, c, score_cutoff=5)
if d < m:
m = d
outcard = c
if outcard:
print(f"replacing {card} to {outcard}")
return outcard
return card
replacement = {
"You Guessed It, Magus of the Moooon": "Magus of the Moon",
"Yera and Oski, Weaver and Guide": "Yera and Oski, Weaver and Guide",
"Yera and Oski, Weaver and Guide": "Arachne, Psionic Weaver",
"Witch-Blessed Meadow": "Witch's Cottage", # Wrong
"Urborg Tomb Yawgy": "Urborg, Tomb of Yawgmoth",
"Urborg (The One That Makes All Lands Swamps)": "Urborg, Tomb of Yawgmoth",
"Ulamog the Infinite Gyre (Borderless) (Foil)": "Ulamog, the Infinite Gyre",
"That One Wurm That Makes the Three 5/5s When It Dies I Have Done Too Many Scryfall Searches Today Sorry": "TBD",
"Thallid Oh Yeah": "Thallid",
"Tabernacle at Penrall Vale": "The Tabernacle at Pendrell Value",
"Swamp (XLN 270)": "Swamp",
"Swamp (DOM #258)": "Swamp",
"Swamp (BRB 18)": "Swamp",
"Swamp (8ED #339)": "Swamp",
"Specter's Shriek (Plz Let It Get Banned This Time)": "Specter's Shriek",
"Speaker of the Heavens!?!?!?!": "Speaker of the Heavens",
"Simispiriguide": "Simian Spirit Guide",
"Signaling Roar": "Riling Dawnbreaker // Signaling Roar",
"Senu, the Keen-Eyed": "Senu, Keen-Eyed Protector",
"Restore Balanse (I Have to Misspell This Cuz the Regex Is Buggy Lmao)": "Restore Balance",
"Red Sun's Zenith (Again)": "Red Sun's Zenith",
"Plains (ONS 333)": "Plains",
"Phelps, Exuberant Swimmer (Phelia)": "Phelia, Exuberant Shepherd",
"Phelia, Tail-Wagging Shepherd": "Phelia, Exuberant Shepherd",
"Nesumi Shortfang": "Nezumi Shortfang // Stabwhisker the Odious",
"Mox Pearl (The One Without Metalcraft)": "Mox Pearl",
"Monty Python and the Holy Grail Black Knight (Oathsworn Knight)": "Oathsworn Knight",
"Miku, Divine Diva": "Elspeth Tirel",
"Mayor of Avarbruck": "Mayor of Avabruck // Howlpack Alpha",
"Magus of the Mooooooooon": "Magus of the Moon",
"Lion's Eye Diamond Cheatyface": "Lion's Eye Diamond",
"Kytheon Hero of Akros": "Kytheon, Hero of Akros // Gideon, Battle-Forged",
"Karaka (Its Listed as Unbanned but the Form Wont Let Me Submit It Idk Its My First Time Lol)": "Karakas",
"Island [KLD #395]": "Island",
"Island (ONE #273)": "Island",
"Inky Mothy Nexy": "Inkmoth Nexus",
"Ink Mothy Nexy": "Inkmoth Nexus",
"Gavel of the Righteous, for I Am Stubborn and Naively Hopeful": "Gavel of the Righteous",
"Gargadon (Neither Greater Nor Lesser)": "Gargadon",
"Forge-Chan": "Chancellor of the Forge",
"Forest!!!!!!": "Forest",
"Forest 🌲": "Forest",
"Forest [LCI #402]": "Forest",
"Forest (Tempest #348)": "Forest",
"Fizik, Etherium Mechanic": "Iron Spider, Stark Upgrade",
"Fizik, Etherium Mechanic": "Fizik, Etherium Mechanic",
"Filigree Sylex": "The Filigree Sylex",
"Elspeth Suns Champion Wooooooo": "Elspeth, Sun's Champion",
"Dwarven Hold (Again)": "Dwarven Hold",
"Dreams of Oil and Steel": "Dreams of Steel and Oil",
"Dark Ritual (STA #89)": "Dark Ritual",
"Dark Ritual (BTD 21)": "Dark Ritual",
"Chronomatonton (The 1 Cost 1/1 That Taps to Get Bigger)": "Chronomaton",
"Chaplain of Arms": "Chaplain of Alms // Chapel Shieldgeist",
"Chancelor of the Tangle (Sic)": "Chancellor of the Tangle",
"Bottomless Depths": "Bottomless Vault", # wrong?
"Boseiju, Who Destroys Target Artifact, Enchantment, or Nonbasic Land (Who Endures)": "Boseiju, Who Endures",
"Bayou - Not Legal": "Bayou",
"Basic Plains": "Plains",
"Azorius Guildgate Anniversary": "Azorius Guildgate",
"Azorius Chancery First": "Azorius Chancery",
"Annex-Chan": "Chancellor of the Annex",
"Annex Chan": "Chancellor of the Annex",
}
def clean_card(card_name):
clean = titlecase.titlecase(card_name.strip())
clean = ''.join(filter(lambda x: x in string.printable, clean))
clean = clean.replace("’", "'")
clean = replacement.get(clean) or clean
if clean not in allcards:
return best_guess(clean)
return clean
cache_file = "AtomicCards.json"
# update the db with all valid mtg cards
def update_mtg_db():
updated = None
if os.path.exists(cache_file):
updated = datetime.now() - datetime.fromtimestamp(os.path.getmtime(cache_file))
if (not updated) or updated > timedelta(days=30):
print("Updating AtomicCards (~100MB)")
cards = requests.get("https://mtgjson.com/api/v5/AtomicCards.json")
cards_data = cards.json()
with open(cache_file, 'w') as f:
json.dump(cards_data, f)
return
def populate_mtg_table():
with open(cache_file, 'r') as f:
cards_data = json.load(f)
cur = con.cursor()
cur.execute("delete from mtg")
print("Populating mtg table...")
count = 0
for card_name, card_variants in cards_data.get('data', {}).items():
if card_variants:
card = card_variants[0]
name = card.get('name', card_name)
cost = card.get('manaCost', '')
card_type = " ".join(card.get('types', ''))
cur.execute("insert into mtg values (?, ?, ?)", (name, cost, card_type))
con.commit()
return
if __name__ == "__main__":
load_schema()
update_mtg_db()
populate_mtg_table()
update_bans()
run_rounds()
|