see above. if the dictionary gets very large it would be slow. really depends on the size of dictionary vs size of word length. if we look at it from a more realistic case with english words, the max length is quite limited and dictionary can get large.
Wouldn’t the time complexity be O((m+n) * l) ? (Where l is the length of each string)
What about this solution?
def word_ladder(begin: str, end: str, word_list: List[str]) -> int:
if end not in word_list:
return -1
def build_patterns(words: List[str]) -> Dict[str, List[str]]:
patterns = defaultdict(list)
for word in words:
for i in range(len(word)):
pattern = word[:i] + "*" + word[i + 1 :]
patterns[pattern].append(word)
return patterns
def get_neighbors(word: str, patterns: Dict[str, List[str]]) -> List[str]:
neighbors = []
for i in range(len(word)):
pattern = word[:i] + "*" + word[i + 1 :]
neighbors.extend(patterns[pattern])
return neighbors
steps = 0
queue = deque([begin])
visited = set([begin])
patterns = build_patterns(word_list)
while queue:
for _ in range(len(queue)):
word = queue.popleft()
if word == end:
return steps
for neighbor in get_neighbors(word, patterns):
if neighbor in visited:
continue
visited.add(neighbor)
queue.append(neighbor)
steps += 1
return -1
My solution using BFS template,
class Solution:
def ladderLength(self, beginWord: str, endWord: str, words: List[str]) -> int:
if endWord not in words: return 0
wordList = set(words)
def get_neighbors(pattern):
res = []
for c in 'abcdefghijklmnopqrstuvwxyz':
for i in range(len(pattern)):
current_text = pattern[:i] + c + pattern[i+1:]
if current_text in wordList:
res.append(current_text)
return res
visited = set()
def bfs(root):
queue = deque([root])
while queue:
pattern, moves = queue.popleft()
if pattern == endWord: return moves + 1
for neighbor in get_neighbors(pattern):
if neighbor in visited:
continue
queue.append((neighbor, moves + 1))
visited.add(neighbor)
return 0
return bfs((beginWord, 0))
from collections import deque, defaultdict
def is_neighbor(word_a, word_b):
count = 0
for char_a, char_b in zip(word_a, word_b):
if char_a != char_b:
count += 1
return count == 1
def word_ladder(begin: str, end: str, word_list: List[str]) -> int:
word_to_neighbors = defaultdict(list)
for word_a in word_list:
for word_b in word_list:
if is_neighbor(word_a, word_b):
word_to_neighbors[word_a].append(word_b)
queue = deque([begin])
steps = 0
visited = set([begin])
while queue:
n = len(queue)
for _ in range(n):
word = queue.popleft()
for neighbor_word in word_to_neighbors[word]:
if neighbor_word in visited:
continue
visited.add(neighbor_word)
if neighbor_word == end:
return steps + 1
queue.append(neighbor_word)
steps += 1
return -1
Could you elaborate on the time complexity here?
I’m confused as to what m
even refers to in this case.
My solution is more efficient as the graph gets build only once and the access to graph is O(1) for read operation
def word_ladder(begin: str, end: str, word_list: List[str]) -> int:
def build_graph():
all_chars = [chr(x) for x in range(ord('a'), ord('z') + 1)]
graph = {x:set() for x in word_list}
for word, inks_to in graph.items():
for index, char in enumerate(word):
for ch in all_chars:
other_word = word[:index] + ch + word [index + 1:]
if other_word != word and other_word in graph:
graph[other_word].add(word)
graph[word].add(other_word)
return graph
graph = build_graph()
def bfs():
steps = 0
q = deque([begin])
seen = set([begin])
while q:
n = len(q)
for _ in range(n):
word = q.popleft()
if word == end:
return steps
linked_words = graph[word]
for linked_word in linked_words:
if linked_word in seen:
continue
q.append(linked_word)
seen.add(linked_word)
steps += 1
return -1
return bfs()